diff --git a/.agents/hooks/enforce-git-hygiene.sh b/.agents/hooks/enforce-git-hygiene.sh new file mode 100755 index 00000000000..d125e2835be --- /dev/null +++ b/.agents/hooks/enforce-git-hygiene.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# PreToolUse hook for the Bash tool. +# Blocks dangerous git patterns that cause CI failures. +set -euo pipefail + +input=$(cat) +command=$(echo "$input" | jq -r '.tool_input.command // empty') + +if [ -z "$command" ]; then + exit 0 +fi + +# Block 'git add .' / 'git add -A' / 'git add --all' +# These stage package-lock.json and other generated files. +if echo "$command" | grep -qE 'git\s+add\s+(\.|--all|-A)(\s|$|;)'; then + echo "BLOCKED: do not use 'git add .' / 'git add -A' / 'git add --all'. Stage files explicitly by name." >&2 + exit 2 +fi + +exit 0 diff --git a/.agents/hooks/enforce-vendored.sh b/.agents/hooks/enforce-vendored.sh new file mode 100755 index 00000000000..d7404d7c66e --- /dev/null +++ b/.agents/hooks/enforce-vendored.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# PreToolUse hook for Edit and Write tools. +# Blocks edits to vendored content that must be fixed upstream. +set -euo pipefail + +input=$(cat) +file_path=$(echo "$input" | jq -r '.tool_input.file_path // empty') + +if [ -z "$file_path" ]; then + exit 0 +fi + +# Block edits to vendored Hugo modules. +if echo "$file_path" | grep -qE '/_vendor/'; then + echo "BLOCKED: _vendor/ is vendored from upstream Hugo modules. Fix in the source repo instead." >&2 + exit 2 +fi + +# Block edits to vendored CLI reference data. +if echo "$file_path" | grep -qE '/data/cli/'; then + echo "BLOCKED: data/cli/ is generated from upstream repos (docker/cli, docker/buildx, etc.). Fix in the source repo instead." >&2 + exit 2 +fi + +exit 0 diff --git a/.agents/settings.json b/.agents/settings.json new file mode 100644 index 00000000000..c4ca5481598 --- /dev/null +++ b/.agents/settings.json @@ -0,0 +1,26 @@ +{ + "hooks": { + "PreToolUse": [ + { + "matcher": "Edit|Write", + "hooks": [ + { + "type": "command", + "command": "bash .agents/hooks/enforce-vendored.sh" + } + ], + "description": "Block edits to vendored content (_vendor/, data/cli/)" + }, + { + "matcher": "Bash", + "hooks": [ + { + "type": "command", + "command": "bash .agents/hooks/enforce-git-hygiene.sh" + } + ], + "description": "Block git add . and dangerous patterns" + } + ] + } +} diff --git a/.agents/skills/agent-readiness-audit/SKILL.md b/.agents/skills/agent-readiness-audit/SKILL.md new file mode 100644 index 00000000000..afb2e56a3ad --- /dev/null +++ b/.agents/skills/agent-readiness-audit/SKILL.md @@ -0,0 +1,228 @@ +--- +name: agent-readiness-audit +description: > + Audit a documentation site for agent-friendliness: discovery, markdown + delivery, crawlability, semantic structure, machine-readable surfaces, + and content legibility. Use when asked to assess docs.docker.com or any + docs site for AI/agent readiness, produce a scored report, compare with + external scanners, or generate a remediation list. Triggers on: + "audit docs for agent readiness", "how agent-friendly is docs.docker.com", + "score our docs for AI agents", "review llms.txt / markdown / crawlability", + "create an agent-readiness remediation plan". +argument-hint: "" +--- + +# Agent Readiness Audit + +Audit the live site, not the source tree alone. Prefer the same fetch path +an external agent would use in the wild: direct HTTP requests, sitemap +sampling, and page-level inspection. + +Do not reduce the result to a homepage-only scan or a binary checklist. + +## 1. Set scope + +Use `$ARGUMENTS` as the base URL when provided. Otherwise infer the base +URL from context and state the assumption. + +Decide whether the host being audited is: + +- a docs-only host +- an app/tool host +- a mixed host + +This matters for optional checks such as MCP, plugin manifests, or other +tool discovery files. Do not penalize a docs-only host for missing +tooling manifests that belong on a separate service. + +For `docs.docker.com`, treat the public docs host as docs-only. Docker's +MCP server is published separately, so missing MCP files on the docs host +should be reported as `N/A`, not as a failure. + +## 2. Gather sitewide signals + +Always check these resources first: + +- `/llms.txt` +- `/llms-full.txt` +- `/robots.txt` +- `/sitemap.xml` + +Only check host-level tool manifests when the host is an app/tool host, +mixed host, or explicitly advertises them: + +- `/.well-known/ai-plugin.json` +- `/.well-known/agent.json` +- `/.well-known/agents.json` + +Use the bundled script for a baseline: + +```bash +bash .agents/skills/agent-readiness-audit/scripts/baseline-probes.sh \ + "$ARGUMENTS" +``` + +The script produces baseline evidence only. You still need to interpret +what matters for a docs property and score it with the rubric. + +For docs-only hosts, you may skip tool-manifest probes to reduce noise: + +```bash +CHECK_TOOL_MANIFESTS=0 \ + bash .agents/skills/agent-readiness-audit/scripts/baseline-probes.sh \ + "$ARGUMENTS" +``` + +## 3. Sample representative pages + +Use the sitemap when available. Do not rely on the homepage alone. + +If `llms.txt` exists, sample some URLs from it as well. This helps catch +stale or misleading discovery surfaces that a sitemap-only sample would miss. + +Sample at least 12 pages when the site is large enough, and cover multiple +page types: + +- homepage or docs landing page +- section landing pages +- task guides +- product manuals +- reference or API pages +- tutorial or learning pages + +If the sitemap is missing or unusable, discover pages through internal +links and note the lower confidence. + +If the site has distinct delivery patterns, sample each one. For example: + +- normal content pages +- generated reference pages +- versioned docs +- localized docs + +## 4. Run fetch-path checks on each sample + +For each sampled page, verify: + +- HTML fetch status, content type, and final URL +- `Accept: text/markdown` behavior +- direct markdown route behavior such as `.md` or another stable path +- page-level markdown alternate links and whether they actually resolve +- whether page actions such as "Open Markdown" agree with the working route +- whether the HTML title or H1 matches the markdown H1 closely enough for + retrieval parity +- whether main content is present in the initial HTML +- redirect chain length and canonical URL consistency +- obvious chrome/noise in the markdown response + +Do not assume a `.md` mirror exists just because another site uses one. +Verify the actual markdown path the site exposes. + +Treat these as separate signals: + +- negotiated markdown works +- a stable direct markdown URL works +- the page advertises the correct markdown URL + +If the page advertises dead markdown alternates but a working markdown route +exists, do not fail markdown delivery outright. Score it as a discoverability +and consistency problem instead. + +For API or generated reference pages, also verify whether a machine-readable +asset such as OpenAPI YAML is directly linked and fetchable. + +## 5. Judge structure and legibility + +Measure structural signals: + +- exactly one `h1` +- sane heading hierarchy +- `main` and `article` presence where appropriate +- canonical tags +- JSON-LD or breadcrumb structured data +- stable anchors and deep-linkable headings + +Also make a qualitative judgment about agent legibility: + +- markdown strips site chrome cleanly +- headings are specific and task-oriented +- code blocks stay intelligible without client-side JS +- the page is not dominated by banners, injected chat, or nav noise + +Measure code block labeling explicitly when code samples are common. A page +type with many untagged fenced blocks should lose points even if the prose is +otherwise clean. + +For page types that intentionally render interactive UIs with JavaScript, +judge them separately from normal docs pages. If the HTML shell is thin, +check whether the page still provides: + +- a fetchable markdown summary +- a directly linked machine-readable asset +- a usable non-JS fallback + +## 6. Score with the rubric + +Use [references/rubric.md](references/rubric.md). + +Rules: + +- score only what you verified +- mark non-applicable checks as `N/A` +- normalize the final score against applicable points only +- do not let optional manifest checks dominate the grade + +Apply the foundational caps from the rubric. A site with broken discovery +or broken markdown delivery should not earn a high grade because it has +clean metadata. + +Do not average away a weak page type. If one major page type, such as API +reference, is materially worse than the rest of the corpus, call it out as +the weakest segment and reflect it in the category notes. + +## 7. Compare with external scanners when useful + +If external scanner results are available, compare them to your live +findings. Treat them as secondary evidence. + +If a scanner and the live fetch disagree: + +- trust the live fetch +- report the mismatch explicitly +- explain whether the scanner is testing a different assumption + +## 8. Produce a remediation list + +Turn findings into a short backlog: + +- `P0`: fetchability or discovery blockers +- `P1`: recurring structural or parity issues +- `P2`: polish, optional manifests, or low-impact enhancements + +For each remediation, include: + +- the failing signal +- why it matters to agents +- a concrete fix +- whether it is sitewide or page-type-specific + +## 9. Report in a stable format + +Use [references/report-template.md](references/report-template.md). + +Always include: + +- overall score and grade +- confidence level +- sampled URLs or sample strategy +- category scores +- highest-priority findings +- remediation backlog + +## Notes + +- Favor docs-delivery checks over marketing-site heuristics. +- Do not fail a docs host for lacking MCP or plugin manifests unless the + host itself is meant to expose tools. +- Treat raw byte size as supporting evidence, not as a primary scoring input. +- Prefer short evidence excerpts and commands over long copied page text. diff --git a/.agents/skills/agent-readiness-audit/references/report-template.md b/.agents/skills/agent-readiness-audit/references/report-template.md new file mode 100644 index 00000000000..e1592eb5054 --- /dev/null +++ b/.agents/skills/agent-readiness-audit/references/report-template.md @@ -0,0 +1,63 @@ +# Agent Readiness Report Template + +Use this structure for final audit output. + +```markdown +## Agent Readiness Audit + +**Site:** +**Date:** +**Overall score:** /100 +**Grade:** +**Confidence:** + +### Summary + +<2-4 sentence verdict focused on what an external agent can actually +discover, fetch, and interpret on this site.> + +### Category Scores + +| Category | Score | Notes | +| --- | ---: | --- | +| Discovery and policy | / | | +| Retrieval and markdown delivery | / | | +| Structure and semantics | / | | +| Crawlability and delivery behavior | / | | +| Machine-readable surfaces | / | | +| Content legibility | / | | + +### Sample + +- Sample strategy: +- Sampled pages: +- Page types covered: +- Weakest page type: + +### Findings + +- `P0`: +- `P1`: +- `P2`: + +### Remediation + +- `P0`: , because +- `P1`: , because +- `P2`: , because + +### Evidence + +- Sitewide checks: +- Fetch-path checks: +- Structural checks:

+- Code block checks: +- Scanner comparison: +``` + +## Notes + +- Keep the summary short and outcome-oriented. +- Findings should refer to concrete URLs or page types. +- If a criterion is `N/A`, say why instead of leaving it blank. diff --git a/.agents/skills/agent-readiness-audit/references/rubric.md b/.agents/skills/agent-readiness-audit/references/rubric.md new file mode 100644 index 00000000000..51f089ed613 --- /dev/null +++ b/.agents/skills/agent-readiness-audit/references/rubric.md @@ -0,0 +1,129 @@ +# Agent Readiness Rubric + +Score the site on a 100-point scale before normalization. If a criterion is +not applicable, remove its points from the denominator instead of treating +it as failed. + +## Grade bands + +- `A`: 90-100 +- `B`: 80-89 +- `C`: 65-79 +- `D`: 50-64 +- `F`: below 50 + +## Confidence levels + +- `High`: sitemap available and at least 12 sampled pages across at least + four page types +- `Medium`: six to 11 sampled pages, or weaker coverage of page types +- `Low`: fewer than six sampled pages, or homepage-biased sampling + +## Foundational caps + +Apply these after computing the raw score: + +- No `sitemap.xml` and no `llms.txt`: maximum grade `C` +- Markdown delivery fails on most sampled pages and no usable alternate + markdown path exists: maximum grade `D` +- Main content is missing from initial HTML on more than 25% of sampled + pages: maximum grade `D` +- `robots.txt` blocks broad crawl access to the docs site and the block is + not clearly intentional: maximum grade `F` + +Optional manifest gaps alone must not drop a docs-only host below `B`. + +## Categories + +### 1. Discovery and policy - 15 points + +- `5` `llms.txt` exists, is fetchable, and is useful for agent discovery +- `4` `sitemap.xml` exists and includes the main docs corpus +- `4` `robots.txt` is accessible and does not unintentionally block major + crawl agents or search agents +- `2` curated bulk-discovery aid exists, such as `llms-full.txt` or an + equivalent machine-readable catalog + +When `llms.txt` exists, sample some URLs from it. Stale or misleading +discovery links should reduce this category even if the file itself exists. + +### 2. Retrieval and markdown delivery - 25 points + +- `8` `Accept: text/markdown` works on sampled pages or an equivalent + negotiated markdown response exists +- `5` a stable direct markdown route works on sampled pages +- `5` page-level markdown hints, alternates, or UI actions point to a + working markdown URL +- `4` markdown responses strip navigation chrome and preserve headings, + links, and code blocks cleanly +- `3` HTML and markdown stay in parity across the sampled set + +### 3. Structure and semantics - 20 points + +- `6` sampled pages have one `h1` and a mostly consistent heading hierarchy +- `5` `main` or `article` marks the primary content and the content is + present in the initial HTML +- `4` canonical tags and stable final URLs are correct +- `3` structured data such as breadcrumbs or article metadata exists where + appropriate +- `2` headings expose stable anchors or deep-link targets, and the HTML title + or H1 stays reasonably aligned with the markdown H1 + +### 4. Crawlability and delivery behavior - 15 points + +- `5` crawl directives are sane for a public docs property +- `4` the site does not depend on client-side rendering to expose core + content +- `3` cache and freshness signals are reasonable for bots, such as + `ETag`, `Last-Modified`, or useful cache headers +- `3` redirect chains are short and predictable + +### 5. Machine-readable surfaces - 10 points + +- `4` API or reference sections expose OpenAPI, schema, or downloadable + machine-readable assets where relevant +- `3` pages with interactive JavaScript reference UIs still provide a usable + non-JS fallback such as markdown, YAML, or another directly linked asset +- `3` tool manifests such as MCP, plugin, or agent descriptors exist only + when the audited host is actually meant to expose tools + +### 6. Content legibility - 15 points + +- `5` markdown is clean and low-noise rather than a dump of site chrome +- `4` headings and section intros are specific enough for retrieval and + chunking +- `3` fenced code blocks are mostly language-tagged and remain copyable and + interpretable +- `3` repeated banners, chat chrome, consent overlays, or other boilerplate + do not overwhelm the main content + +## Scoring guidance + +Use the full category only when the signal is consistently good across the +sample. Partial credit is expected. + +Examples: + +- A sitewide `llms.txt` that exists but is stale or too shallow may earn + partial credit rather than full credit. +- If markdown works only on some page types, score that criterion based on + observed coverage instead of failing or passing it outright. +- If a working markdown route exists but the page advertises a dead + alternate URL, deduct in markdown discoverability rather than in raw + markdown availability. +- If `llms.txt` exists but points to stale, broken, or inconsistent paths, + deduct in discovery rather than in core fetchability. +- If tool manifests are irrelevant to the host, mark them `N/A`. +- If a major page type is weaker than the rest of the site, note that + explicitly instead of letting stronger page types hide it in the average. + +## Reporting guidance + +For every category, include one line that explains the score: + +- what was tested +- what passed +- what limited the score + +Use evidence from live fetches. Do not score from assumptions about the +framework or source repository. diff --git a/.agents/skills/agent-readiness-audit/scripts/baseline-probes.sh b/.agents/skills/agent-readiness-audit/scripts/baseline-probes.sh new file mode 100755 index 00000000000..50875aee068 --- /dev/null +++ b/.agents/skills/agent-readiness-audit/scripts/baseline-probes.sh @@ -0,0 +1,287 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if [[ $# -lt 1 ]]; then + echo "usage: $0 [sample-url ...]" >&2 + exit 1 +fi + +if ! command -v curl >/dev/null 2>&1; then + echo "curl is required" >&2 + exit 1 +fi + +if ! command -v rg >/dev/null 2>&1; then + echo "rg is required" >&2 + exit 1 +fi + +BASE_URL="${1%/}" +shift || true +SAMPLE_SIZE="${SAMPLE_SIZE:-12}" +LLMS_SAMPLE_SIZE="${LLMS_SAMPLE_SIZE:-2}" +CHECK_TOOL_MANIFESTS="${CHECK_TOOL_MANIFESTS:-1}" +TMPDIR="$(mktemp -d)" +trap 'rm -rf "$TMPDIR"' EXIT + +count_matches() { + local pattern="$1" + local file="$2" + rg -o "$pattern" "$file" 2>/dev/null | wc -l | tr -d ' ' || true +} + +header_value() { + local header_file="$1" + local name="$2" + awk -F': ' -v target="$name" ' + tolower($1) == tolower(target) { value = $2 } + END { + gsub(/\r/, "", value) + print value + } + ' "$header_file" +} + +normalize_text() { + printf '%s' "$1" \ + | tr '[:upper:]' '[:lower:]' \ + | sed -E 's/[[:space:]]+/ /g; s/^[[:space:]]+//; s/[[:space:]]+$//; s/ \| docker docs$//' +} + +code_fence_stats() { + local file="$1" + awk ' + BEGIN { in_block = 0; total = 0; tagged = 0 } + /^```/ { + line = $0 + sub(/^```[[:space:]]*/, "", line) + if (!in_block) { + total++ + if (line != "") { + tagged++ + } + in_block = 1 + } else { + in_block = 0 + } + } + END { + printf "%d\t%d\n", total, tagged + } + ' "$file" +} + +resource_probe() { + local url="$1" + local label="$2" + local body="$TMPDIR/resource-body" + local headers="$TMPDIR/resource-headers" + local status + local content_type + local bytes + + status="$(curl -sS -L -o "$body" -D "$headers" -w '%{http_code}' "$url" || true)" + content_type="$(header_value "$headers" "content-type")" + bytes="$(wc -c < "$body" | tr -d ' ')" + + printf '%s\t%s\t%s\t%s\t%s\n' "$label" "$url" "$status" "$content_type" "$bytes" +} + +page_probe() { + local url="$1" + local html="$TMPDIR/page-html" + local html_headers="$TMPDIR/page-html-headers" + local md="$TMPDIR/page-md" + local md_headers="$TMPDIR/page-md-headers" + local direct_md="$TMPDIR/page-direct-md" + local direct_md_headers="$TMPDIR/page-direct-md-headers" + local alt_md="$TMPDIR/page-alt-md" + local alt_md_headers="$TMPDIR/page-alt-md-headers" + local status + local content_type + local final_url + local h1_count + local main_count + local article_count + local canonical_count + local jsonld_count + local md_alt + local md_alt_url + local direct_md_url + local html_title + local html_h1 + local md_h1 + local md_status + local md_content_type + local md_bytes + local direct_md_status + local direct_md_content_type + local md_alt_status="na" + local md_alt_content_type="na" + local title_md_h1_match="no" + local html_h1_md_h1_match="no" + local code_blocks_total + local code_blocks_tagged + + status="$( + curl -sS -L -o "$html" -D "$html_headers" \ + -w '%{http_code}\t%{url_effective}' "$url" || true + )" + content_type="$(header_value "$html_headers" "content-type")" + final_url="${status#*$'\t'}" + status="${status%%$'\t'*}" + + h1_count="$(count_matches ']' "$html")" + main_count="$(count_matches ']' "$html")" + article_count="$(count_matches ']' "$html")" + canonical_count="$(count_matches 'rel=canonical' "$html")" + jsonld_count="$(count_matches 'application/ld\+json' "$html")" + md_alt="$( + rg -o 'type=text/markdown href=[^ >]+|href=[^ >]+[^>]*type=text/markdown' \ + "$html" -m 1 2>/dev/null | sed -E 's/.*href=([^ >]+).*/\1/' || true + )" + + md_status="$(curl -sS -L -H 'Accept: text/markdown' -o "$md" -D "$md_headers" -w '%{http_code}' "$url" || true)" + md_content_type="$(header_value "$md_headers" "content-type")" + md_bytes="$(wc -c < "$md" | tr -d ' ')" + direct_md_url="$(printf '%s' "$final_url" | sed 's#/$##').md" + direct_md_status="$(curl -sS -L -o "$direct_md" -D "$direct_md_headers" -w '%{http_code}' "$direct_md_url" || true)" + direct_md_content_type="$(header_value "$direct_md_headers" "content-type")" + html_title="$(rg -o '[^<]+' "$html" -m 1 2>/dev/null | sed 's/<title>//' || true)" + html_h1="$(rg -o '<h1[^>]*>[^<]+' "$html" -m 1 2>/dev/null | sed -E 's/<h1[^>]*>//' || true)" + md_h1="$(awk '/^# / { sub(/^# /, ""); print; exit }' "$md" || true)" + + if [[ -n "$html_title" && -n "$md_h1" ]]; then + if [[ "$(normalize_text "$html_title")" == "$(normalize_text "$md_h1")" ]]; then + title_md_h1_match="yes" + fi + fi + + if [[ -n "$html_h1" && -n "$md_h1" ]]; then + if [[ "$(normalize_text "$html_h1")" == "$(normalize_text "$md_h1")" ]]; then + html_h1_md_h1_match="yes" + fi + fi + + IFS=$'\t' read -r code_blocks_total code_blocks_tagged < <(code_fence_stats "$md") + + if [[ -n "$md_alt" ]]; then + if [[ "$md_alt" =~ ^https?:// ]]; then + md_alt_url="$md_alt" + elif [[ "$md_alt" == /* ]]; then + md_alt_url="${BASE_URL}${md_alt}" + else + md_alt_url="${BASE_URL}/${md_alt}" + fi + md_alt_status="$(curl -sS -L -o "$alt_md" -D "$alt_md_headers" -w '%{http_code}' "$md_alt_url" || true)" + md_alt_content_type="$(header_value "$alt_md_headers" "content-type")" + else + md_alt_url="na" + fi + + printf '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' \ + "$url" \ + "$status" \ + "$content_type" \ + "$final_url" \ + "$h1_count" \ + "$main_count" \ + "$article_count" \ + "$canonical_count" \ + "$jsonld_count" \ + "$md_status" \ + "$md_content_type" \ + "$md_bytes" \ + "$direct_md_url" \ + "$direct_md_status" \ + "$direct_md_content_type" \ + "$md_alt_url" \ + "$md_alt_status" \ + "$md_alt_content_type" \ + "$title_md_h1_match" \ + "$html_h1_md_h1_match" \ + "$code_blocks_total" \ + "$code_blocks_tagged" +} + +llms_urls() { + local llms="$TMPDIR/llms-sample.txt" + local llms_status + + llms_status="$(curl -sS -L -o "$llms" -w '%{http_code}' "$BASE_URL/llms.txt" || true)" + if [[ "$llms_status" == "200" ]]; then + rg -o '\(https?://[^)]+\)' "$llms" 2>/dev/null \ + | tr -d '()' \ + | rg "^${BASE_URL//./\\.}" \ + | rg -v '/404\.html$|/search/?$|\.xml$|\.txt$' \ + | awk -v limit="$LLMS_SAMPLE_SIZE" '!seen[$0]++ && NR <= limit { print }' + fi +} + +sitemap_urls() { + local sitemap="$TMPDIR/sitemap.xml" + local sitemap_status + + sitemap_status="$(curl -sS -L -o "$sitemap" -w '%{http_code}' "$BASE_URL/sitemap.xml" || true)" + if [[ "$sitemap_status" == "200" ]]; then + rg -o '<loc>[^<]+' "$sitemap" \ + | sed 's/<loc>//' \ + | rg "^${BASE_URL//./\\.}" \ + | rg -v '/404\.html$|/search/?$|\.xml$|\.txt$' \ + | awk '!seen[$0]++ { print }' + fi +} + +sample_urls() { + if [[ $# -gt 0 ]]; then + printf '%s\n' "$@" + return + fi + + local sample_file="$TMPDIR/sampled-urls.txt" + + { + llms_urls + sitemap_urls + } | awk -v limit="$SAMPLE_SIZE" ' + !seen[$0]++ { + print + count++ + if (count >= limit) { + exit + } + } + ' > "$sample_file" + + if [[ ! -s "$sample_file" ]]; then + printf '%s/\n' "$BASE_URL" + else + cat "$sample_file" + fi +} + +printf 'META\tbase-url\t%s\n' "$BASE_URL" +if [[ $# -gt 0 ]]; then + printf 'META\tsample-source\texplicit\n' +else + printf 'META\tsample-source\tllms-and-sitemap-or-homepage\n' +fi +printf '\nSITEWIDE\n' +printf 'label\turl\tstatus\tcontent-type\tbytes\n' +resource_probe "$BASE_URL/llms.txt" "llms.txt" +resource_probe "$BASE_URL/llms-full.txt" "llms-full.txt" +resource_probe "$BASE_URL/robots.txt" "robots.txt" +resource_probe "$BASE_URL/sitemap.xml" "sitemap.xml" +if [[ "$CHECK_TOOL_MANIFESTS" == "1" ]]; then + resource_probe "$BASE_URL/.well-known/ai-plugin.json" "ai-plugin.json" + resource_probe "$BASE_URL/.well-known/agent.json" "agent.json" + resource_probe "$BASE_URL/.well-known/agents.json" "agents.json" +fi + +printf '\nPAGES\n' +printf 'url\tstatus\tcontent-type\tfinal-url\th1\tmain\tarticle\tcanonical\tjsonld\tmd-negotiate-status\tmd-negotiate-content-type\tmd-bytes\tmd-direct-url\tmd-direct-status\tmd-direct-content-type\tmd-alt-url\tmd-alt-status\tmd-alt-content-type\ttitle-md-h1-match\th1-md-h1-match\tcode-blocks-total\tcode-blocks-tagged\n' +while IFS= read -r page_url; do + [[ -z "$page_url" ]] && continue + page_probe "$page_url" +done < <(sample_urls "$@") diff --git a/.agents/skills/check-pr/SKILL.md b/.agents/skills/check-pr/SKILL.md new file mode 100644 index 00000000000..15c76cde1ca --- /dev/null +++ b/.agents/skills/check-pr/SKILL.md @@ -0,0 +1,114 @@ +--- +name: check-pr +description: > + Check a single PR's CI status, review comments, and requested changes. + Fix actionable failures and address feedback. "check PR 1234", "what's + the status of my PR", "address review comments on #500". +argument-hint: "<pr-number>" +context: fork +--- + +# Check PR + +Do one pass over PR **$ARGUMENTS**: check CI, read reviews, fix what's +actionable, report status. + +## 1. Gather PR state + +```bash +# Overall state +gh pr view $ARGUMENTS --repo docker/docs --json state,title,url,headRefName + +# CI checks +gh pr checks $ARGUMENTS --repo docker/docs --json name,state,detailsUrl + +# Top-level reviews +gh pr view $ARGUMENTS --repo docker/docs --json reviews,reviewDecision + +# Inline (line-level) comments — NOT included in the above +gh api repos/docker/docs/pulls/$ARGUMENTS/comments \ + --jq '[.[] | {id: .id, author: .user.login, body: .body, path: .path, line: .line}]' +``` + +Always check both the reviews endpoint and the inline comments endpoint. +A review with an empty body may still have line-level comments requiring +action. + +## 2. If merged + +Report the final state. Then check for any unanswered review comments (both +top-level and inline) and reply to each one explaining what was done or that +the issue was addressed in a follow-up. Skip to step 6 after. + +## 3. If closed without merge + +Read the closing context to understand why: + +```bash +gh pr view $ARGUMENTS --repo docker/docs --json closedAt,comments \ + --jq '{closedAt, lastComment: .comments[-1].body}' +``` + +Report the reason. Common causes: rejected by maintainers, superseded by +another PR, closed by automation. + +## 4. If CI is failing + +- Read the failure details (follow `detailsUrl` if needed) +- Determine if the failure is in the PR's changed files or pre-existing +- **Actionable:** check out the branch, fix, commit, push + ```bash + git checkout <branch> + # fix the issue + git add <files> + git commit -m "fix: <description>" + git push + ``` +- **Pre-existing / upstream:** note it, do not block + +## 5. If review comments or changes requested + +- Read each unresolved comment +- Address feedback in a follow-up commit +- Push, then reply to each comment explaining what was done: + ```bash + gh api repos/docker/docs/pulls/$ARGUMENTS/comments \ + --method POST \ + --field in_reply_to=<comment-id> \ + --field body="<response>" + ``` +- End every comment reply with an accurate agent-disclosure footer that names + the active coding agent, for example `Generated by Codex` or `Generated by + Claude Code`. +- Resolve each thread via GraphQL after replying: + ```bash + # Get thread IDs + gh api graphql -f query=' + query($owner:String!, $repo:String!, $pr:Int!) { + repository(owner:$owner, name:$repo) { + pullRequest(number:$pr) { + reviewThreads(first:50) { + nodes { id isResolved comments(first:1) { nodes { path } } } + } + } + } + }' -f owner=docker -f repo=docs -F pr=$ARGUMENTS \ + --jq '.data.repository.pullRequest.reviewThreads.nodes[] | select(.isResolved == false) | {id, path: .comments.nodes[0].path}' + + # Resolve a thread + gh api graphql -f query=' + mutation($id:ID!) { resolveReviewThread(input:{threadId:$id}) { thread { isResolved } } } + ' -f id=<thread-id> + ``` +- Re-request review if changes were requested + +## 6. Report + +``` +## PR #$ARGUMENTS: <title> + +**State:** <open|merged|closed> +**CI:** <passing|failing|pending> +**Review:** <approved|changes requested|pending> +**Action taken:** <what was done, or "none needed"> +``` diff --git a/.agents/skills/create-lab-guide/SKILL.md b/.agents/skills/create-lab-guide/SKILL.md new file mode 100644 index 00000000000..877c4e3f217 --- /dev/null +++ b/.agents/skills/create-lab-guide/SKILL.md @@ -0,0 +1,126 @@ +--- +name: create-lab-guide +description: > + Create a guide page for a Labspace. This includes writing the markdown content for the guide, + structuring it according to Docker docs conventions, and ensuring it provides clear instructions + and information about the Labspace. Includes learning about the lab itself, extracting out its + learning objectives, and combining all of that into a well-structured guide markdown file. +--- + +# Create Lab Guide + +You are creating a new guide page for a labspace. The guide should be structured according to Docker docs conventions, +with clear sections, learning objectives, and instructions for users to get the most out of the lab. + +## Inputs + +The user provides one or more guides to migrate. Resolve these from the inventory below: + +- **REPO_NAME**: GitHub repo in the `dockersamples` org (e.g. `labspace-ai-fundamentals`) + +## Step 1: Clone the labspace repo + +Clone the guide repo to a temporary directory. This gives you all source files locally — no HTTP calls needed. + +```bash +git clone --depth 1 https://github.com/dockersamples/{REPO_NAME}.git <tmpdir>/{REPO_NAME} +``` + +Where `<tmpdir>` is a temporary directory on your system (e.g. the output of `mktemp -d`). + +## Step 2: Learn and extract key information about the lab + +The repo structure is: + +- `<tmpdir>/{REPO_NAME}/README.md` — the main README for the lab +- `<tmpdir>/{REPO_NAME}/labspace/labspace.yaml` — a YAML document outlining details of the lab, including the sections/modules and the path to their content +- `<tmpdir>/{REPO_NAME}/labspace/*.md` — the content for each section/module (only reference the files specified in `labspace.yaml`) +- `<tmpdir>/{REPO_NAME}/.github/workflows/` — the GHA workflow that publishes the labspace. It includes the repo URL for the published Compose file, which will be useful for the "launch" command +- `<tmpdir>/{REPO_NAME}/compose.override.yaml` - lab-specific Compose customizations + +1. Read `README.md` to understand the purpose of the lab. +2. Read the `labspace/labspace.yaml` to understand the structure of the lab and its sections/modules. +3. Read the `labspace/*.md` files to extract the learning objectives, instructions, and any code snippets. +4. Extract a short description that can be used for the `description` and `summary` fields in the guide markdown. +5. Determine if a model will be pulled when starting the lab by looking at the `compose.override.yaml` file and looking for the any top-level `model` specifications. + + +## Step 2: Write the guide markdown + +The markdown file must be located in the `guides/` directory and have a filename of `lab-{GUIDE_ID}.md`. + +Sample markdown structure, including frontmatter and content: + +```markdown +--- +title: "Lab: { Short title }" +linkTitle: "Lab: { Short title }" +description: | + A short description of the lab for SEO and social sharing. +summary: | + A short summary of the lab for the guides listing page. 2-3 lines. +keywords: AI, Docker, Model Runner, agentic apps, lab, labspace +aliases: # Include if the lab is an AI-related lab + - /labs/docker-for-ai/{REPO_NAME_WITHOUT_LABSPACE_PREFIX}/ +params: + tags: [ai, labs] + time: 20 minutes + resource_links: + - title: A resource link pointing to relevant documentation or code + url: /ai/model-runner/ + - title: Labspace repository + url: https://github.com/dockersamples/{REPO_NAME} +--- + +Short explanation of the lab and what it covers. + +## Launch the lab + +{{< labspace-launch image="dockersamples/{REPO_NAME}" >}} + +## What you'll learn + +By the end of this Labspace, you will have completed the following: + +- Objective #1 +- Objective #2 +- Objective #3 +- Objective #4 + +## Modules + +| # | Module | Description | +|---|--------|-------------| +| 1 | Module #1 | Description of module #1 | +| 2 | Module #2 | Description of module #2 | +| 3 | Module #3 | Description of module #3 | +| 4 | Module #4 | Description of module #4 | +| 5 | Module #5 | Description of module #5 | +| 6 | Module #6 | Description of module #6 | +``` + +Important notes: + +- The learning objectives should be based on the content of the labspace as a whole. +- The modules should be based on the sections/modules outlined in `labspace.yaml`. +- All lab guides _must_ have a tag of `labs` +- If the lab is AI-related, it should also have a tag of `ai` and aliases for `/labs/docker-for-ai/{REPO_NAME}/` +- If the lab pulls a model, add a `model-download: true` parameter to the `labspace-launch` shortcode to show a warning about model downloads. + + +## Step 3: Apply Docker docs style rules + +These are mandatory (from STYLE.md and AGENTS.md): + +- **No "we"**: "We are going to create" → "Create" or "Start by creating" +- **No "let us" / "let's"**: → imperative voice or "You can..." +- **No hedge words**: remove "simply", "easily", "just", "seamlessly" +- **No meta-commentary**: remove "it's worth noting", "it's important to understand" +- **No "allows you to" / "enables you to"**: → "lets you" or rephrase +- **No "click"**: → "select" +- **No bold for emphasis or product names**: only bold UI elements +- **No time-relative language**: remove "currently", "new", "recently", "now" +- **No exclamations**: remove "Voila!!!" etc. +- Use `console` language hint for interactive shell blocks with `$` prompts +- Use contractions: "it's", "you're", "don't" + diff --git a/.agents/skills/create-pr/SKILL.md b/.agents/skills/create-pr/SKILL.md new file mode 100644 index 00000000000..33ae4468e57 --- /dev/null +++ b/.agents/skills/create-pr/SKILL.md @@ -0,0 +1,139 @@ +--- +name: create-pr +description: > + Push the current branch and create a pull request against docker/docs. + Use after changes are committed and reviewed. "create a PR", "submit the + fix", "open a pull request for this". +--- + +# Create PR + +Push the branch and create a properly structured pull request. + +## 1. Verify the branch + +```bash +git log --oneline main..HEAD # confirm commits exist +git diff --quiet # confirm no unstaged changes +``` + +## 2. Push the branch + +Confirm origin points to your fork, not upstream: + +```bash +git remote get-url origin +``` + +Then push: + +```bash +git push -u origin <branch-name> +``` + +## 3. Create the PR + +Before creating a PR for an issue, check whether that issue already has an open +linked PR: + +```bash +gh api repos/docker/docs/issues/<issue-number>/timeline --paginate \ + --jq '.[] | select((.event=="cross-referenced" or .event=="connected" or .event=="referenced") and .source.issue.pull_request and .source.issue.state=="open") | {url: .source.issue.html_url, title: .source.issue.title}' +``` + +If this returns an open PR that addresses the same issue, stop. Don't open a +duplicate PR; report the existing PR instead. Only proceed if there is no open +linked PR, or if the existing PR clearly does not address the issue and you +explain why in the new PR body. + +Derive the fork owner dynamically: + +```bash +FORK_OWNER=$(git remote get-url origin | sed -E 's|.*[:/]([^/]+)/[^/]+(\.git)?$|\1|') +``` + +```bash +gh pr create --repo docker/docs \ + --head "${FORK_OWNER}:<branch-name>" \ + --title "<concise summary under 70 chars>" \ + --body "$(cat <<'EOF' +## Summary + +<1-2 sentences: what was wrong and what was changed> + +Closes #NNNN + +Generated by <active coding agent name> +EOF +)" +``` + +Keep the body short. Reviewers need to know what changed and why — nothing +else. + +Use an accurate disclosure footer that names the active coding agent, for +example `Generated by Codex` or `Generated by Claude Code`. + +### Optional: Learnings section + +If while working on this PR you discovered something non-obvious about the +repo — a convention not documented in AGENTS.md, a gotcha that tripped you +up, a pattern that should be codified — add a Learnings section to the PR +body: + +```markdown +## Learnings + +- <what you learned and why it matters> +``` + +Add this section between the Summary and the `Closes` line. Only include +learnings that would help future contributors avoid the same issue. Do not +include things already documented in AGENTS.md or STYLE.md. + +The weekly PR learnings scanner reads these sections to surface recurring +patterns for the team to codify. + +## 4. Apply labels and request review + +Use the Issues API for labels — `gh pr edit --add-label` silently fails: + +```bash +gh api repos/docker/docs/issues/<pr-number>/labels \ + --method POST \ + --field 'labels[]=status/review' +``` + +Request review: + +```bash +gh pr edit <pr-number> --repo docker/docs --add-reviewer docker/docs-team +``` + +Verify the reviewer was assigned: + +```bash +gh pr view <pr-number> --repo docker/docs --json reviewRequests \ + --jq '.reviewRequests[].slug' +``` + +If the team doesn't appear, use the API directly: + +```bash +gh api repos/docker/docs/pulls/<pr-number>/requested_reviewers \ + --method POST --field 'team_reviewers[]=docs-team' +``` + +## 5. Report + +Print the PR URL and current CI state: + +```bash +gh pr view <pr-number> --repo docker/docs --json url,state +gh pr checks <pr-number> --repo docker/docs --json name,state +``` + +## Notes + +- Always use `Closes #NNNN` (not "Fixes") for GitHub auto-close linkage +- One issue, one branch, one PR — never combine diff --git a/.agents/skills/fix-issue/SKILL.md b/.agents/skills/fix-issue/SKILL.md new file mode 100644 index 00000000000..4f32fe91f5b --- /dev/null +++ b/.agents/skills/fix-issue/SKILL.md @@ -0,0 +1,85 @@ +--- +name: fix-issue +description: > + Fix a single GitHub issue end-to-end: triage, research, write the fix, + review, and create a PR. Use when asked to fix an issue: "fix issue 1234", + "resolve #500", "create a PR for issue 200". +argument-hint: "<issue-number>" +--- + +# Fix Issue + +Given GitHub issue **$ARGUMENTS**, decide what to do with it and either +close it or fix it. This skill orchestrates the composable skills — it owns +the decision tree, not the individual steps. + +## 1. Triage + +Invoke `/triage-issue $ARGUMENTS` to understand the issue and decide what +to do. This runs in a forked subagent and returns a verdict. + +## 2. Act on the triage result + +If triage says **close it** — comment with the reason and close: +```bash +gh issue close $ARGUMENTS --repo docker/docs \ + --comment "<one sentence explaining why> + +Generated by <active coding agent name>" +``` +Done. + +If triage says **escalate upstream** — comment noting the repo and stop: +```bash +gh issue comment $ARGUMENTS --repo docker/docs \ + --body "This needs to be fixed in <upstream-repo>. + +Generated by <active coding agent name>" +``` +Done. + +If triage says **leave it open** — comment explaining what was checked and +what's unclear. Do not close. +Done. + +End every issue comment with an accurate agent-disclosure footer that names +the active coding agent, for example `Generated by Codex` or `Generated by +Claude Code`. + +If triage says **fix it** — proceed to step 3. + +## 3. Research + +Invoke `/research` to locate affected files, verify facts, and identify +the fix. The issue context carries over from triage. This runs inline — +findings stay in conversation context for the write step. + +If research reveals the issue is upstream or cannot be fixed (e.g. +unverifiable URLs), comment on the issue and stop. + +## 4. Write + +Invoke `/write` to create a branch, make the change, format, self-review, +and commit. + +## 5. Review + +Invoke `/review-changes` to check the diff for correctness, coherence, and +mechanical compliance. This runs in a forked subagent with fresh context. + +If issues are found, fix them and re-review until clean. + +## 6. Create PR + +Invoke `/create-pr` to push the branch and open a pull request. + +## 7. Return to main + +```bash +git checkout main +``` + +## 8. Report + +Summarize what happened: the issue number, what was done (closed, escalated, +fixed with a PR link), and why — in a sentence or two. diff --git a/.agents/skills/migrate-content-ia/SKILL.md b/.agents/skills/migrate-content-ia/SKILL.md new file mode 100644 index 00000000000..cf0bf697ee8 --- /dev/null +++ b/.agents/skills/migrate-content-ia/SKILL.md @@ -0,0 +1,384 @@ +--- +name: migrate-content-ia +description: > + Handle Hugo docs information-architecture moves: discover old vs new URLs, + add front matter aliases (Phase 1), update in-repo links (Phase 2), interactive + List 2 resolution and fragment validation (Phase 3; no guessing). Supports + PR-scoped mapping plus whole-content sweeps for inbound links to that mapping, + or a full-site follow-up. Triggers on: "IA migration", "redirects for moved + pages", "fix links after content move", "PR-scoped link/anchor pass", + "aliases for old URLs". After branch work, chain the review-changes skill + (main...HEAD) before a PR. Agents must run the in-file required procedure + and definition of done, not the phases alone in isolation. +--- + +# Migrate content IA (redirects + links + anchors) + +Use this skill when pages **move or rename** under `content/` and you must +preserve old public URLs and/or fix cross-references. Work in **phases**; +choose **PR-scoped** vs **full-site** mode per run. + +**Read first:** **CLAUDE.md** / **AGENTS.md** (URL rules, vendored areas, external +links, special cases) and **hugo.yaml** (`permalinks`, `refLinksErrorLevel`, +`disablePathToLower`). For **prose and link text**, follow **STYLE.md**; for +**components, front matter, and link examples**, follow **COMPONENTS.md**. + +**Related skills:** **research** helps map moves and find inbound links; **write** +commits minimal edits. Run this skill’s phases after the move is identified (or +in parallel with research for large IA work). + +## Agent: required procedure (do not skip) + +**Common mistake (wrong):** use **`git diff main...HEAD` (or the PR’s file +list) as the full set of places to fix links** for a migration. That set shows +**what *moved***; it is **not** the list of every page that **points *to*** a +moved page. Inbound stragglers are often in files the PR **never** touched. You +must still **sweep the repo** for every string in the **old path and published-URL set** +for this run, not only for “files in the diff.” + +**Definition of done (when the migration is *finished*):** **Both** of the +following (unless the user or **AGENTS.md** **explicitly defers** a **List 2** +item in **Phase 3**; document the deferral): + +1. **`docker buildx bake validate`** passes for the branch, with no new + build/link errors from this work. +2. A **sweep of the old path and published-URL set for this run** (see + [Sweep commands](#sweep-commands) below) finds **no** remaining + migration-relevant **inbound** reference—**including**: + - links to an old **source** path (plain `.md` and equivalent `ref` forms), + - links that use the old path **and** a `#fragment`, + - and, where your mapping includes them, old **published-style** `link:` / + `url:` / full-site URL strings, + **except** intentional entries to keep: for example `aliases` on the **new** + canonical page, or **redirects.yml** *sources* you must not edit per policy. + (A hit on a **source** that is only an `alias` line on the new page is + **expected**—do not “fix” that away; distinguish alias rows from straggler + links in body or nav config.) + +**Chaining (policy):** when this branch’s content work is ready for handoff, +**run the [review-changes](../review-changes/SKILL.md) skill** on +**`main...HEAD`** (or **`merge-base`…`HEAD`** for a different target branch) so +the **whole branch** is re-read for cross-page issues before opening a PR. Do +not treat phases 0–3 alone as the final check. + +**Run in order (mandatory for agents):** + +1. **Scope the moves (mapping input):** set the Git range like **review-changes** + (for a PR to `main`: `git diff --name-only main...HEAD`; for another target: + `BASE=$(git merge-base <target-branch> HEAD)` then + `git diff --name-only $BASE...HEAD`, as in **Phase 0.5**). Include + renames; build the **old → new** table (source and published) per **Phase + 0**. +2. **Sweep and list:** for every **old** path/URL in that table, run + [Sweep commands](#sweep-commands) on the **allowed** trees. Record + every hit as **List 1** (no `#`) or **List 2** (old path with `#...`) per + **Phase 0.5**. +3. **Phased edits:** **Phase 1** (`aliases`), then **Phase 2** (List 1), then + **Phase 3** (List 2) with **no guessing**—as in the sections below. +4. **Re-sweep** the same old-path set, then run **`docker buildx bake + validate`**. The **Definition of done** above is met or you have **explicit + defers** for the remainder. +5. **review-changes:** run **[review-changes](../review-changes/SKILL.md)** + on the branch vs **`main`…`HEAD`** (or the correct base) before a PR. + +### Sweep commands + +Use a **repository** search (e.g. `rg` / your IDE) so **nothing** in the +allowed scope is only eyeballed. + +**Trees to include** (at minimum): all of `content/`, plus **`data/`** and +**`layouts/`** when a migration can appear in config, `link:`-like fields, +shortcodes, or hardcoded path strings. Follow **Vendored / generated** rules in +**AGENTS.md**; do not edit disallowed files. + +**What to search for (repeat per row in the old side of the mapping):** + +- **Hugo / source form:** path segments that identify the *old* file, e.g. + `manuals/.../old-segment/...` or `../old-segment/.../page.md` as your tree + uses; include variants that still appear in the repo. +- **Published / site form:** e.g. `/admin/.../old-slug/` in front matter, nav + `url:`, or `https://docs.docker.com/...` in allowed files—**match the + file’s** established pattern, per **Conventions** below. +- **Anchors:** search for the **old path string**; matches that also include + `#...` belong on **List 2** for **Phase 3** unless the whole link is + a pure path-only case. + +[scripts/scope-pr-files.sh](scripts/scope-pr-files.sh) (if present) prints +**`PR_SCOPE_FILES` only**—it does **not** replace this sweep. Use it to build +the **old → new** table, **not** to list where inbound links were fixed. + +## Progressive disclosure (optional) + +The procedure below stays in this file. If a run produces a very large +**old → new** URL table, store that table in **`reference.md`** in this skill +directory and link it from the task summary, so the agent reads the long +mapping only when needed. + +## Modes + +- **PR-scoped (typical for a single PR)** + - **What the PR “owns” (focus):** use `git diff` / `base...HEAD` to know which + pages and renames the branch actually moves (`PR_SCOPE_FILES`). The **old → + new** mapping and **List 1 / List 2** for this migration are defined from + **that** work, not from unrelated areas. + - **Where to look for stale references (sweep):** search broadly—typically all + of `content/` (and config, shortcodes, layouts, per Conventions)—for **inbound** + links and fields whose **target** is an **old** path or URL in **this** PR’s + mapping. Inbound stragglers are often in files the PR never touched; finding + them is **in scope** for this migration. + - **What to edit:** update **any** file in the allowed trees that contains a + **migration-relevant** reference (target ∈ this PR’s old path set) according + to the phases below. **Do not** treat `PR_SCOPE_FILES` as a hard limit on + *which files you may save* for **inbound** link repairs (unless + project policy for a given PR says otherwise; then follow policy and + **defer** out-of-PR file fixes). + - **Out of scope (defer / ignore in this run):** link and anchor problems that + are **not** about this PR’s old→new map—e.g. a different area’s own slug + issues, rot unrelated to the remapped path set. *Example:* a PR that only + remaps `content/strawberry/...` should not “fix the whole site”; it **should** + still fix a link under `mango/…` that **points at** an old `strawberry/…` path + in the mapping, and **should not** chase **mango/**-only issues that do + not involve those old targets. + +- **Full-site (complete migration after the PR)** + - Update stragglers **across the repo** (or all inbound links to moved + sections), including config-driven `link:` fields if policy allows. + - Still make **minimal** edits; no drive-by rewrites to **unrelated** targets + outside the run’s **declared** mapping and lists. + +### No guessing + +- The agent must **not** guess **replacement paths, published URLs, or fragment + IDs** (including for consolidated pages, renamed headings, or + “semantic” remaps of `#anchor` → new `#…`). If the user has not given an + explicit new target, **ask**, **defer**, or **stop** per **AGENTS.md**; never + infer, autocomplete, or substitute a plausible fragment from the target page’s + heading list. That rule applies in **every** phase, including after validation + in Phase 3. + +--- + +## Conventions (links, anchors, redirects) + +### Front matter `aliases` (redirects) + +- Per **COMPONENTS.md**, `aliases` are **URLs that redirect to this page**. +- Add or **merge** on the **new canonical** page; do not drop unrelated + entries. Match local examples: **published-style paths** (leading `/`), and + **trailing `/`** when that matches existing pages in the same area. +- **No** speculative redirects for URLs that were never published. +- **Collision check** before adding: no other page or redirect may already + own the same old path. +- If the site also uses **`data/redirects.yml`**, only add entries when + project policy requires it; avoid duplicating the same old URL in + `aliases` **and** `redirects.yml` unless maintainers do. + +### Internal links in Markdown (STYLE.md + COMPONENTS.md) + +- Use **relative paths to source files** (e.g. `../section/page.md`) with + **`.md`**, following **COMPONENTS.md** examples, unless the file already + uses an established pattern (e.g. some `link:` or nav fields use **published** + paths without `manuals` or `.md` — **match the surrounding file**). +- Keep **CLAUDE.md** / **AGENTS.md** rules: internal ref targets under + `content/manuals/...` often use the full **`/manuals/...`** path; published + URLs omit the `manuals` segment—do not confuse the two when fixing links. +- **Link text (STYLE.md):** descriptive, ~**5 words**; no “click here” or + “learn more”; **no** end punctuation **inside** the link text; **no** bold/italic + on link text unless normal in the sentence. +- **Headings (STYLE):** **sentence case**; do not rename headings in passing + unless the migration requires it (heading changes break fragments). + +### Shortcodes and layouts (links not only in Markdown) + +- **Phase 2–3 scope includes** any **shortcode or layout partial** (under + **Modes**, search broadly for inbound links to the migration; **edits** follow + the same file-level rules as for Markdown) that emits links: e.g. `ref` / + `relref`, `link` fields in shortcode args, or hardcoded + `docs.docker.com` / path strings. Grep for old paths, slugs, and fragments + under `layouts/shortcodes/` (and `layouts/_default/` if partials build nav). +- Match each file’s existing pattern; do not rewrite working shortcode style + just to “clean up.” + +### Fragments / anchors (Phase 3) + +- List 1 / List 2: fragment-bearing **cross-references to old paths** are tracked + on **List 2** in Phase 0.5; do not bulk-rewrite them in the **List 1** pass + (Phase 2). See Phase 0.5 and Phase 2. +- **Valid `#fragment` values:** after the user supplies a new fragment, it should + match the **target** page’s **generated** heading ID (Hugo slugification; see + **CLAUDE.md** / **AGENTS.md**). The agent still **validates** (see Phase 3) and + must **not** “pick” a different id from the page to replace a bad answer—**No + guessing**. +- Same-page: `[Text](#section-id)`. +- Cross-page: when user-provided, `#fragment` must still be checked against the + **target** file. Validate fragments in shortcodes the same way as in body + Markdown. + +### External URLs (**AGENTS.md**) + +- Do not commit **guessed** replacement URLs. If a URL cannot be verified, + treat as blocked or drop the fragment per AGENTS guidance. See also **No + guessing** above; internal and external link targets are treated the same for + inference: **none** without user input or a verified source. + +### Special cases (**AGENTS.md**) + +- **Engine API version** pages: respect coordinated **`/latest/` `aliases`** + rules—never leave two version files both owning `/latest/`. +- **Vendored / generated** trees: read-only; see CLAUDE.md. Do not “fix” links + there if policy forbids. + +--- + +## Phase 0 — Discovery (read-only; may use whole repo) + +1. Read **hugo.yaml** (permalinks, `refLinksErrorLevel`, `disablePathToLower`). +2. From the branch (diff, renames), build a **mapping table**: + - old source path → new source path + - old published URL → new published URL (from permalink rules) +3. **Case:** with `disablePathToLower: true`, filesystem path **case** appears in + URLs—**directory and link casing must match** (e.g. `setup` vs `Setup`). +4. When planning **inbound link** fixes, treat old-path references as two + categories: **no fragment** vs **with `#fragment`**. That split feeds + **List 1** and **List 2** in Phase 0.5 and drives Phase 2 ordering (see + there). + +--- + +## Phase 0.5 — PR-scoped evaluation (required before edits in PR mode) + +1. **Set `PR_SCOPE_FILES` (Git scope for PR mode)** + - When the PR **targets `main`**, use the same triple-dot form as + **review-changes**: + `git diff --name-only main...HEAD` + - For a **different target branch** or a custom base, use the merge base: + `BASE=$(git merge-base <target-branch> HEAD)` + then: + `git diff --name-only "$BASE"...HEAD` + - Those paths define **what moved** in the branch; they are the primary input + to the **old → new** path/URL table. They are **not** a hard cap on *where + to search* for **inbound** links (see **Modes**): sweeps for links **to** old + paths usually cover all of `content/` (and other trees per Conventions). + - If project policy **limits edits** to the diff for a given PR, follow that + and **defer** link fixes in files outside the diff; note the exception in + the task if the user relaxes that policy. + +2. Build checklists (see **Modes** for sweep vs area-of-work): + - path/URL mapping this run must honor (old source path → new; old published + → new, from the **PR’s** moves in PR-scoped mode, or the **declared** full + migration in full-site mode) + - **List 1 — old path, no fragment:** every **inbound** reference, found on + the **sweep** surface, to a moved **old** path that does **not** include a + `#...` fragment (e.g. `…/banana.md` in the repo’s link style for that + file). + - **List 2 — old path with fragment:** every **inbound** reference, found on + the same sweep, to a moved **old** path that **includes** a `#...` fragment + (e.g. `…/banana.md#anchor` or the published-style equivalent in context). The + **same** old path string may appear on **both** List 1 and List 2 for + different links; duplication across the two lists is OK. + - **Matching rules:** when recording List 1 / List 2, use **one** consistent + path representation for comparison (e.g. relative `../path/banana.md` vs + root-anchored) **per the conventions in this doc** and the **surrounding + file’s** established pattern. Agents compare and skip List 2 links in the + List 1 pass using the **same** representation rules. +3. **Out of scope** for the lists: only include references whose **old** target + is in this run’s **mapping**. Do not build List 1/2 for unrelated **mango/** + (or other) problems unless those links also target an **old** path that this + migration renames. Defer those issues separately (see **Modes**). + +--- + +## Phase 1 — `aliases` (old published URLs) + +1. On each **new** canonical page, add or merge **`aliases`** for every **real** + former public URL. +2. Do not strip existing unrelated aliases. +3. **PR-scoped:** add aliases only where the canonical file is in scope or the + project requires it; otherwise list missing alias targets for follow-up. + +--- + +## Phase 2 — In-repo link reference updates + +1. **List 1 first (path only):** update references that belong to **List 1** + (old path, **no** fragment). Replace old source paths or old published URLs + with the **new** targets; preserve each file’s link pattern (relative vs + root-anchored `.md` paths). **Do not** apply the same bulk path replacement to + links that appear in **List 2** (old path **with** `#...`) during this + sub-step—**leave** every **List 2** link **unchanged** for now. +2. **After List 1 is complete:** **re-scan** the **same** **sweep** surface as + in Phase 0.5 (e.g. all of `content/` plus config) or **print** a clear list of + all **remaining** **List 2** entries. Those links should still point at the + **old** path and **old** fragment until Phase 3. +3. **Full-site (extra sweep):** after steps 1–2, still use **AGENTS “Page + deletion checklist”**-style thoroughness for **config / front matter** + `link:` and similar so nav and grids are not left on old slugs. Apply the + **List 1 / List 2** rules there too: path-only old references first; defer + fragment-bearing rewrites in line with **List 2** until Phase 3. +4. **PR-scoped (which files to change):** apply List 1 and later Phase 3 updates + to **every** file the **sweep** finds with a **migration-relevant** reference + (inbound to an **old** path in the mapping), including files **not** in + `PR_SCOPE_FILES`, per **Modes**. **Log** and **defer** (do not “fix”) + unrelated stragglers. If policy forbids out-of-PR file edits, defer per step 1 + of Phase 0.5. +5. Include **shortcodes and layout partials** (see Conventions and **Modes** for + sweep vs focus). + +--- + +## Phase 3 — List 2: interactive path and fragment resolution + +**Prerequisites:** Phase 2 has updated **List 1**; **List 2** still lists **old +path + `#...`** (unchanged) for this migration. See **Modes** for which files +may be edited; **No guessing** applies. + +1. **Print List 2** to the user: every remaining **old path** + `#anchor` (in the + agreed representation), so nothing is hidden before the loop. +2. **For each distinct** `old-path#oldAnchor` (or process in the order the user + prefers, one at a time): + - Ask: **What is the new path (and fragment, if any) for this content?** The + user may give a new source path, published URL, and/or `#newAnchor` per + project conventions. + - **Validate** the user’s answer: open the **target** page (or resolve the + target) and check that `#newAnchor` (if any) **exists** as a real heading + / generated id on that page, per **CLAUDE.md** / **AGENTS.md** (same rules + as the rest of the site). **Do not** replace the user’s fragment with a + “better” one from the file. + - If validation **fails** (unknown target file, or `#newAnchor` not found on + the page): **warn** clearly (what failed: path vs missing fragment), then + **ask again** for a corrected path and/or fragment. **Repeat** until + validation passes or the user **defers** / **drops** the fragment (per + **AGENTS.md**). **Never** guess a new fragment to fix the problem. + - When validation **passes:** update **all** in-repo references that match + that **same** `old-path#oldAnchor` to the user-approved `new-path#newAnchor` + (respect each file’s link style; include shortcodes/layouts on the same + **sweep** surface as Phase 2). +3. **Repeat** from step 1: **re-print** or **re-scan** for **List 2** until it is + **empty** or the user defers the remainder. +4. **PR-scoped / full-site:** the **loop** is the same. **Edits** follow **Modes**: + migration-relevant **inbound** links may live in any file on the sweep; do + not expand into **unrelated** link debt from other areas. Defer as in **Modes** + and Phase 0.5. + +--- + +## Optional: scripts helper + +This skill includes a small **scope helper** so agents do not re-derive Git +recipes. See [scripts/scope-pr-files.sh](scripts/scope-pr-files.sh) — it prints +paths in PR scope for a given target branch (default `main`). + +--- + +## Verification + +```bash +docker buildx bake validate +``` + +Use the **Definition of done** in **Agent: required procedure (do not skip)** +as the final bar: **validate** must pass, and the **sweep** must be clean for +**plain** and **`#fragment`** old-path references, **or** the remainder must be +**explicitly deferred** in **Phase 3** per **AGENTS.md** / the user. Mid-run, +**Phase 2** may still leave **List 2** links unchanged **until** Phase 3; that +intermediate state is **not** the finished migration. diff --git a/.agents/skills/migrate-content-ia/scripts/scope-pr-files.sh b/.agents/skills/migrate-content-ia/scripts/scope-pr-files.sh new file mode 100644 index 00000000000..d027ee3e47a --- /dev/null +++ b/.agents/skills/migrate-content-ia/scripts/scope-pr-files.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# List files changed on the current branch since merge-base with the target +# branch — suitable for PR_SCOPE_FILES in PR-scoped migrate-content-ia runs. +# +# Usage: +# ./scope-pr-files.sh [target-branch] +# Default target-branch: main +# +# Example (Bash / Git Bash, from repo root): +# bash .agents/skills/migrate-content-ia/scripts/scope-pr-files.sh +# Example (other base): +# bash .../scope-pr-files.sh upstream/main +set -euo pipefail + +target="${1:-main}" + +if ! base=$(git merge-base "$target" HEAD 2>/dev/null); then + echo "Error: could not merge-base with '$target'. Fetch remotes or pass a valid branch." >&2 + exit 1 +fi + +git diff --name-only "$base"...HEAD diff --git a/.agents/skills/research/SKILL.md b/.agents/skills/research/SKILL.md new file mode 100644 index 00000000000..db9f98c3ffa --- /dev/null +++ b/.agents/skills/research/SKILL.md @@ -0,0 +1,93 @@ +--- +name: research +description: > + Research a documentation topic — locate affected files, understand the + problem, identify what to change. Use when investigating an issue, a + question, or a topic before writing a fix. Triggers on: "research issue + 1234", "investigate what needs changing for #500", "what files are + affected by #200", "where is X documented", "is our docs page about Y + accurate", "look into how we document Z". +--- + +# Research + +Thoroughly investigate the topic at hand and produce a clear plan for +the fix. The goal is to identify exact files, named targets within those +files, and the verified content needed for the fix. + +## 1. Gather context + +If the input is a GitHub issue number, fetch it: + +```bash +gh issue view <number> --repo docker/docs \ + --json number,title,body,labels,comments +``` + +Otherwise, work from what was provided — a description, a URL, a question, +or prior conversation context. Identify the topic, affected feature, or +page to investigate. + +## 2. Locate affected files + +Search `content/` using the URL or topic from the issue. Remember the +`/manuals` prefix mapping when converting URLs to file paths. + +For each candidate file, read the relevant section to confirm it contains +the reported problem. + +## 3. Check vendored ownership + +Before planning any edit, verify the file is editable locally: + +- `_vendor/` — read-only, vendored via Hugo modules +- `data/cli/` — read-only, generated from upstream YAML +- `content/reference/cli/` — read-only, generated from `data/cli/` +- Everything else in `content/` — editable + +If the fix requires upstream changes, identify the upstream repo and note +it as out of scope. See the vendored content table in CLAUDE.md. + +## 4. Find related content + +Look for pages that may need updating alongside the primary fix: + +- Pages that link to the affected content +- Include files (`content/includes/`) referenced by the page +- Related pages in the same section describing the same feature + +## 5. Verify facts + +If the issue makes a factual claim about how a feature behaves, verify it. +Follow external links, read upstream source, check release notes. Do not +plan a fix based on an unverified claim. + +If the fix requires a replacement URL and that URL cannot be verified (e.g. +network restrictions), report it as a blocker rather than guessing. + +## 6. Check the live site (if needed) + +For URL or rendering issues, fetch the live page: + +``` +https://docs.docker.com/<path>/ +``` + +## 7. Report findings + +Summarize what you found — files to change, the specific problem in each, +what the fix should be, and any constraints. This context feeds directly +into the write step. + +Be specific: name the file, the section or element within it, and the +verified content needed. "Fix the broken link in networking.md" is not +specific enough. "In `compose/networking.md`, the 'Custom networks' section, +remove the note about `driver_opts` being ignored — this was fixed in +Compose 2.24" is. + +## Notes + +- Research quality bounds write quality. Vague research produces broad + changes; precise research produces minimal ones. +- Do not create standalone research files — findings stay in conversation + context for the write step. diff --git a/.agents/skills/review-changes/SKILL.md b/.agents/skills/review-changes/SKILL.md new file mode 100644 index 00000000000..f147883a44b --- /dev/null +++ b/.agents/skills/review-changes/SKILL.md @@ -0,0 +1,107 @@ +--- +name: review-changes +description: > + Review uncommitted or recently committed documentation changes for + correctness, coherence, and style compliance. Use before creating a PR + to catch issues. "review my changes", "review the diff", "check the fix + before submitting", "does this look right". +context: fork +model: opus +--- + +# Review Changes + +Evaluate whether the changes correctly and completely solve the stated +problem, without introducing new issues. Start with no assumptions — the +change may contain mistakes. Your job is to catch what the writer missed, +not to rubber-stamp the diff. + +## 1. Identify what changed + +Determine the scope of changes to review: + +```bash +# Uncommitted changes +git diff --name-only + +# Last commit +git diff --name-only HEAD~1 + +# Entire branch vs main +git diff --name-only main...HEAD +``` + +Pick the right comparison for what's being reviewed. If reviewing a branch, +use `main...HEAD` to see all changes since the branch diverged. + +## 2. Read each changed file in full + +Do not just read the diff. For every changed file, read the entire file to +understand the full context the change lives in. A diff can look correct in +isolation but contradict something earlier on the same page. + +Then read the diff for the detailed changes: + +```bash +# Adjust the comparison to match step 1 +git diff --unified=10 # uncommitted +git diff --unified=10 HEAD~1 # last commit +git diff --unified=10 main...HEAD # branch +``` + +## 3. Follow cross-references + +For each changed file, check what links to it and what it links to: + +- Search for other pages that reference the changed content (grep for the + filename, heading anchors, or key phrases) +- Read linked pages to verify the change doesn't create contradictions + across pages +- Check that anchor links in cross-references still match heading IDs + +A change that's correct on its own page can break the story told by a +related page. + +## 4. Verify factual accuracy + +Don't assume the change is factually correct just because it reads well. + +- If the change describes how a feature behaves, verify against upstream + docs or source code +- If the change includes a URL, check that it resolves +- If the change references a CLI flag, option, or API field, confirm it + exists + +## 5. Evaluate as a reader + +Consider someone landing on this page from a search result, with no prior +context: + +- Does the page make sense on its own? +- Is the changed section clear without having read the issue or diff? +- Would a reader be confused by anything the change introduces or leaves + out? + +## 6. Review code and template changes + +For non-Markdown changes (JS, HTML, CSS, Hugo templates): + +- Trace through the common execution path +- Trace through at least one edge case (no stored preference, Alpine fails + to load, first visit vs returning visitor) +- Ask whether the change could produce unexpected browser or runtime + behavior that no automated tool would catch + +## 7. Decision + +**Approve** if the change is correct, coherent, complete, and factually +accurate. + +**Request changes** if: +- The change does not correctly solve the stated problem +- There is a factual error or contradiction (on-page or cross-page) +- A cross-reference is broken or misleading +- A reader would be confused + +When requesting changes, be specific: quote the exact text that is wrong, +explain why, and suggest the correct fix. diff --git a/.agents/skills/testcontainers-guides-migrator/SKILL.md b/.agents/skills/testcontainers-guides-migrator/SKILL.md new file mode 100644 index 00000000000..2be7e74daad --- /dev/null +++ b/.agents/skills/testcontainers-guides-migrator/SKILL.md @@ -0,0 +1,401 @@ +--- +name: testcontainers-guide-migrator +description: > + Migrate a Testcontainers guide from testcontainers.com into the Docker docs site (docs.docker.com). + Converts AsciiDoc to Hugo Markdown, updates code to the latest Testcontainers API, splits into + chapters with stepper navigation, verifies code compiles and tests pass, and validates against + Docker docs style rules. Use when asked to migrate a testcontainers guide, add a TC guide, or + port content from testcontainers.com to Docker docs. +--- + +# Migrate a Testcontainers Guide + +You are migrating guides from https://testcontainers.com/guides/ into the Docker docs Hugo site. +Each guide lives in its own GitHub repo under `testcontainers/tc-guide-*`, written in AsciiDoc. +The source repos are listed in the testcontainers-site build.sh: +https://github.com/testcontainers/testcontainers-site/blob/main/build.sh#L23-L45 + +## Inputs + +The user provides one or more guides to migrate. Resolve these from the inventory below: + +- **REPO_NAME**: GitHub repo (e.g. `tc-guide-getting-started-with-testcontainers-for-java`) +- **SLUG**: guide slug inside `guide/` dir (e.g. `getting-started-with-testcontainers-for-java`) +- **LANG**: language identifier (go, java, dotnet, nodejs, python) +- **GUIDE_ID**: short kebab-case name (e.g. `getting-started`) + +## Guide inventory + +These are the 21 guides from testcontainers.com/guides/ and their source repos: + +| # | Title | Repo | Lang | GUIDE_ID | +|---|-------|------|------|----------| +| 1 | Introduction to Testcontainers | tc-guide-introducing-testcontainers | (none) | introducing | +| 2 | Getting started for Java | tc-guide-getting-started-with-testcontainers-for-java | java | getting-started | +| 3 | Testing Spring Boot REST API | tc-guide-testing-spring-boot-rest-api | java | spring-boot-rest-api | +| 4 | Testcontainers lifecycle (JUnit 5) | tc-guide-testcontainers-lifecycle | java | lifecycle | +| 5 | Configuration of services in container | tc-guide-configuration-of-services-running-in-container | java | service-configuration | +| 6 | Replace H2 with real database | tc-guide-replace-h2-with-real-database-for-testing | java | replace-h2 | +| 7 | Testing ASP.NET Core web app | tc-guide-testing-aspnet-core | dotnet | aspnet-core | +| 8 | Testing Spring Boot Kafka Listener | tc-guide-testing-spring-boot-kafka-listener | java | spring-boot-kafka | +| 9 | REST API integrations with MockServer | tc-guide-testing-rest-api-integrations-using-mockserver | java | mockserver | +| 10 | Getting started for .NET | tc-guide-getting-started-with-testcontainers-for-dotnet | dotnet | getting-started | +| 11 | AWS integrations with LocalStack | tc-guide-testing-aws-service-integrations-using-localstack | java | aws-localstack | +| 12 | Testcontainers in Quarkus apps | tc-guide-testcontainers-in-quarkus-applications | java | quarkus | +| 13 | Getting started for Go | tc-guide-getting-started-with-testcontainers-for-go | go | getting-started | +| 14 | jOOQ and Flyway with Testcontainers | tc-guide-working-with-jooq-flyway-using-testcontainers | java | jooq-flyway | +| 15 | Getting started for Node.js | tc-guide-getting-started-with-testcontainers-for-nodejs | nodejs | getting-started | +| 16 | REST API integrations with WireMock | tc-guide-testing-rest-api-integrations-using-wiremock | java | wiremock | +| 17 | Local dev with Testcontainers Desktop | tc-guide-simple-local-development-with-testcontainers-desktop | java | local-dev-desktop | +| 18 | Micronaut REST API with WireMock | tc-guide-testing-rest-api-integrations-in-micronaut-apps-using-wiremock | java | micronaut-wiremock | +| 19 | Micronaut Kafka Listener | tc-guide-testing-micronaut-kafka-listener | java | micronaut-kafka | +| 20 | Getting started for Python | tc-guide-getting-started-with-testcontainers-for-python | python | getting-started | +| 21 | Keycloak with Spring Boot | tc-guide-securing-spring-boot-microservice-using-keycloak-and-testcontainers | java | keycloak-spring-boot | + +Already migrated: **#2 (Java getting-started)**, **#13 (Go getting-started)**, **#20 (Python getting-started)** + +## Step 0: Pre-flight + +1. Confirm `testing-with-docker` tag exists in `data/tags.yaml`. If not, add: + ```yaml + testing-with-docker: + title: Testing with Docker + ``` +2. Check if new terms need adding to `_vale/config/vocabularies/Docker/accept.txt`. +3. Read `STYLE.md` and `COMPONENTS.md` to refresh on Docker docs conventions. + +## Step 1: Clone the guide repo + +Clone the guide repo to a temporary directory. This gives you all source files locally — no HTTP calls needed. + +```bash +git clone --depth 1 https://github.com/testcontainers/{REPO_NAME}.git <tmpdir>/{REPO_NAME} +``` + +Where `<tmpdir>` is a temporary directory on your system (e.g. the output of `mktemp -d`). + +The repo structure is: +- `<tmpdir>/{REPO_NAME}/guide/{SLUG}/index.adoc` — the AsciiDoc guide source +- `<tmpdir>/{REPO_NAME}/src/` — application source code (referenced by `include::` directives) +- `<tmpdir>/{REPO_NAME}/testdata/` — test data files (SQL scripts, configs, etc.) +- `<tmpdir>/{REPO_NAME}/pom.xml` or `go.mod` — build config + +1. Read `guide/{SLUG}/index.adoc` to get the guide content. +2. Find all `include::{codebase}/path/to/file[]` directives. The `{codebase}` attribute points to a remote URL, but since you have the repo cloned, read the files directly from disk instead (e.g. `include::{codebase}/src/main/java/Foo.java[]` → read `<tmpdir>/{REPO_NAME}/src/main/java/Foo.java`). +3. If includes have `[lines="X..Y"]`, extract only those lines from the local file. +4. Note the `[source,lang]` block preceding each include — that determines the code fence language. + +This cloned repo also serves as the base for Step 6 (code verification) — you can run the tests directly in it to confirm they pass before updating the code to the latest API. + +## Step 2: Convert AsciiDoc to Markdown + +| AsciiDoc | Markdown | +|---|---| +| `== Heading` | `## Heading` | +| `=== Heading` | `### Heading` | +| `*bold*` (AsciiDoc bold) | `**bold**` | +| `https://url[Link text]` | `[Link text](url)` | +| `[source,lang]\n----\ncode\n----` | `` ```lang\ncode\n``` `` | +| `[source,shell]` with `$` prompts | `` ```console `` | +| `[NOTE]\ntext` or `====\n[NOTE]\n...\n====` | `> [!NOTE]\n> text` | +| `[TIP]\ntext` | `> [!TIP]\n> text` | +| `:toc:`, `:toclevels:`, `:codebase:` | Remove entirely | +| `include::{codebase}/path[]` | Replace with fetched code in a code fence | +| YAML front matter (date, draft, repo) | Remove; transform to Docker docs format | + +## Step 3: Apply Docker docs style rules + +These are mandatory (from STYLE.md and AGENTS.md): + +- **No "we"**: "We are going to create" → "Create" or "Start by creating" +- **No "let us" / "let's"**: → imperative voice or "You can..." +- **No hedge words**: remove "simply", "easily", "just", "seamlessly" +- **No meta-commentary**: remove "it's worth noting", "it's important to understand" +- **No "allows you to" / "enables you to"**: → "lets you" or rephrase +- **No "click"**: → "select" +- **No bold for emphasis or product names**: only bold UI elements +- **No time-relative language**: remove "currently", "new", "recently", "now" +- **No exclamations**: remove "Voila!!!" etc. +- Use `console` language hint for interactive shell blocks with `$` prompts +- Use contractions: "it's", "you're", "don't" + +## Step 4: Update code to latest Testcontainers API + +Research the latest API version for the target language before writing code. + +**Best practices reference**: The Testcontainers team maintains Claude skills with up-to-date API patterns and best practices for each language at https://github.com/testcontainers/claude-skills/ — check the relevant language skill (testcontainers-go, testcontainers-node, testcontainers-dotnet) for current API signatures, cleanup patterns, wait strategies, and anti-patterns to avoid. + +For each language, check the cloned repo's existing code, then update to the latest API. Key patterns per language: + +**Go** (testcontainers-go v0.41.0): +- `postgres.RunContainer(ctx, opts...)` → `postgres.Run(ctx, "image", opts...)` +- `testcontainers.WithImage(...)` → image is now the 2nd positional param to `Run()` +- Manual `WithWaitStrategy(wait.ForLog(...))` → `postgres.BasicWaitStrategies()` +- `t.Cleanup(func() { ctr.Terminate(ctx) })` → `testcontainers.CleanupContainer(t, ctr)` +- `if err != nil { log.Fatal(err) }` → `require.NoError(t, err)` (use testify require/assert) +- Helper functions should accept `t *testing.T` as first param, call `t.Helper()` +- No `TearDownSuite()` needed if `CleanupContainer` is registered in the helper +- Go version prerequisite: 1.25+ + +**Java** (testcontainers-java 2.0.4): +- Artifacts renamed in 2.x: `org.testcontainers:postgresql` → `org.testcontainers:testcontainers-postgresql` +- Check the latest version at https://java.testcontainers.org/ +- Use `@Testcontainers` and `@Container` annotations for JUnit 5 lifecycle +- Prefer module-specific containers (e.g. `PostgreSQLContainer`) over `GenericContainer` +- Use `@DynamicPropertySource` for Spring Boot integration + +**.NET** (testcontainers-dotnet): +- Check the latest NuGet package version +- Use `IAsyncLifetime` for container lifecycle in xUnit +- Use builder pattern: `new PostgreSqlBuilder().Build()` + +**Node.js** (testcontainers-node): +- Check the latest npm version +- Use module-specific packages (e.g. `@testcontainers/postgresql`) +- Use `GenericContainer` for services without a dedicated module + +**Python** (testcontainers-python): +- Check the latest PyPI version +- Use context managers (`with PostgresContainer() as postgres:`) +- Use module-specific containers when available + +For all languages: consult the corresponding Testcontainers skill at https://github.com/testcontainers/claude-skills/ for current best practices and anti-patterns. + +## Step 5: Create guide directory structure + +Directory: `content/guides/testcontainers-{LANG}-{GUIDE_ID}/` + +Each guide is its own top-level entry under `/guides/`. Do NOT nest guides inside a shared parent section — otherwise they won't appear individually in the tag/language filters on the guides listing page. + +### _index.md (landing page) + +```yaml +--- +title: {Full guide title} +linkTitle: {Short title for guides listing} +description: {One-line description} +keywords: testcontainers, {lang}, testing, {technologies used} +summary: | + {2-3 line summary for the guides listing card} +toc_min: 1 +toc_max: 2 +tags: [testing-with-docker] +languages: [{lang}] +params: + time: {estimated} minutes +--- + +<!-- Source: https://github.com/testcontainers/{REPO_NAME} --> +``` + +Content: what you'll learn (bulleted list), prerequisites, and a NOTE linking to `https://testcontainers.com/getting-started/` for newcomers. + +### Sub-pages (chapters) + +Split the guide into logical chapters. Each sub-page: + +```yaml +--- +title: {Chapter title} +linkTitle: {Short title for stepper} +description: {One-line description} +weight: {10, 20, 30, ...} +--- +``` + +**No `tags`, `languages`, or `params` on sub-pages** — only on `_index.md`. + +Typical chapter breakdown: +| Weight | File | Content | +|--------|------|---------| +| 10 | `create-project.md` | Project setup, dependencies, business logic | +| 20 | `write-tests.md` | First test using testcontainers | +| 30 | `test-suites.md` | Reusing containers, test helpers, suites | +| 40 | `run-tests.md` | Running tests, summary, further reading | + +Adapt the split to the guide's content — some guides may need fewer or more chapters. + +## Step 6: Verify code compiles and tests pass + +This is CRITICAL. The code in the guide MUST compile and all tests MUST pass. Do not skip this step. + +### 6a: Use the cloned repo as the verification project + +The repo you cloned in Step 1 (`<tmpdir>/{REPO_NAME}`) already contains a working project with all source files, build config, and tests. Use it as the starting point: + +```bash +cd <tmpdir>/{REPO_NAME} +``` + +First, verify the **original** code compiles and tests pass before you change anything. This confirms a good baseline. + +### 6b: Update the code in the cloned repo + +After confirming the original works, apply the API updates (from Step 4) directly in the cloned repo's source files. This is the same code you're putting in the guide — keep them in sync. + +### 6c: Update dependencies and compile + +Run compilation inside a container for reproducibility — no need to install the language toolchain on the host. Use the appropriate language Docker image, mounting the cloned repo: + +```bash +docker run --rm -v "<tmpdir>/{REPO_NAME}":/app -w /app <language-image> sh -c "<compile command>" +``` + +Pick the right image for the language (e.g. `golang:1.25-alpine`, `maven:3-eclipse-temurin-21`, `gradle:jdk21`, `mcr.microsoft.com/dotnet/sdk:9.0`, `node:22-alpine`, `python:3.13-alpine`). Update dependencies to the latest Testcontainers version and compile. + +If compilation fails, fix the code and update the guide markdown to match. + +### 6d: Run tests in a container with Docker socket mounted + +Run tests in the same kind of container, but **mount the Docker socket** so Testcontainers can create sibling containers. + +#### macOS Docker Desktop workarounds + +When running on macOS with Docker Desktop, these environment variables and flags are **required**: + +- **`TESTCONTAINERS_HOST_OVERRIDE=host.docker.internal`** — On macOS, containers can't reach sibling containers via the Docker bridge IP (`172.17.0.x`). This tells Testcontainers (including Ryuk) to connect via `host.docker.internal` instead. **Do NOT disable Ryuk** — it is a core Testcontainers feature and the guides must demonstrate proper usage. +- **`docker-java.properties`** with `api.version=1.47` — Docker Desktop's minimum API version is 1.44, but docker-java defaults to 1.24. Create this file in the project root and mount it to `/root/.docker-java.properties` inside Java containers. +- **`-Dspotless.check.skip=true`** — The Spotless Maven plugin in the source repos is incompatible with JDK 21. Skip it since it's a code formatter, not part of the test. +- **`-Dmicronaut.test.resources.enabled=false`** — Micronaut's Test Resources service starts a separate process that can't connect to Docker from inside a container. The guide tests use Testcontainers directly, not Test Resources. Only needed for Micronaut guides. +#### Java guide test command + +```bash +# Create docker-java.properties in the project root +echo "api.version=1.47" > <tmpdir>/{REPO_NAME}/docker-java.properties + +docker run --rm \ + -v "<tmpdir>/{REPO_NAME}":/app \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v "<tmpdir>/{REPO_NAME}/docker-java.properties":/root/.docker-java.properties \ + -e DOCKER_HOST=unix:///var/run/docker.sock \ + -e TESTCONTAINERS_HOST_OVERRIDE=host.docker.internal \ + -w /app \ + maven:3.9-eclipse-temurin-21 \ + mvn -B test -Dspotless.check.skip=true -Dspotless.apply.skip=true +``` + +For Quarkus guides, use `maven:3.9-eclipse-temurin-17` instead (Quarkus 3.22.3 compiles for Java 17). + +#### Go guide test command + +```bash +docker run --rm \ + -v "<tmpdir>/{REPO_NAME}":/app \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e DOCKER_HOST=unix:///var/run/docker.sock \ + -e TESTCONTAINERS_HOST_OVERRIDE=host.docker.internal \ + -w /app \ + golang:1.25-alpine \ + sh -c "apk add --no-cache gcc musl-dev && go test -v -count=1 ./..." +``` + +#### Python guide test command + +```bash +docker run --rm \ + -v "<tmpdir>/{REPO_NAME}":/app \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e DOCKER_HOST=unix:///var/run/docker.sock \ + -e TESTCONTAINERS_HOST_OVERRIDE=host.docker.internal \ + -w /app \ + python:3.13-slim \ + sh -c "pip install -r requirements.txt && python -m pytest" +``` + +#### .NET guide test command + +```bash +docker run --rm \ + -v "<tmpdir>/{REPO_NAME}":/app \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e DOCKER_HOST=unix:///var/run/docker.sock \ + -e TESTCONTAINERS_HOST_OVERRIDE=host.docker.internal \ + -w /app \ + mcr.microsoft.com/dotnet/sdk:9.0 \ + dotnet test +``` + +#### Node.js guide test command + +```bash +docker run --rm \ + -v "<tmpdir>/{REPO_NAME}":/app \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e DOCKER_HOST=unix:///var/run/docker.sock \ + -e TESTCONTAINERS_HOST_OVERRIDE=host.docker.internal \ + -w /app \ + node:22-alpine \ + sh -c "npm install && npm test" +``` + +#### Important: run tests sequentially + +Run guide tests **one at a time**. Running multiple concurrent DinD or sibling-container tests can overwhelm Docker Desktop's containerd store and cause `meta.db: input/output error` corruption, requiring a Docker Desktop restart. + +### 6e: Fix until green + +If any test fails, debug and fix the code in both the temporary project AND the guide markdown. Re-run until all tests pass. Do not proceed until verified. + +## Step 7: Update cross-references + +1. **`content/manuals/testcontainers.md`**: Add a bullet under the `## Guides` section: + ```markdown + - [Guide title](/guides/testcontainers-{LANG}-{GUIDE_ID}/) + ``` +2. **Do NOT update** `content/guides/testcontainers-cloud/_index.md` — keep its external links. +3. Link to `https://testcontainers.com/getting-started/` for the Testcontainers overview. +4. Use internal paths for already-migrated guides; keep `testcontainers.com` links for unmigrated ones. + +## Step 8: Validate + +**IMPORTANT**: Run ALL validation locally before committing. Vale checks run on CI and will block the PR if they fail — fixing after push wastes CI cycles and review time. + +1. `npx prettier --write content/guides/testcontainers-{LANG}-{GUIDE_ID}/` +2. `npx prettier --write content/manuals/testcontainers.md` +3. `docker buildx bake lint` — must pass with no errors +4. `docker buildx bake vale` — then check for errors in the new files: + ```bash + grep -A2 "testcontainers-{LANG}-{GUIDE_ID}" tmp/vale.out + ``` + Fix ALL errors before proceeding. Common issues: + - **Vale.Spelling**: tech terms (library names, tools) not in the dictionary → add to `_vale/config/vocabularies/Docker/accept.txt` (alphabetical order) + - **Vale.Terms**: wrong casing (e.g. "python" → "Python") → fix in the markdown. Watch for package names like `testcontainers-python` triggering false positives — rephrase to "Testcontainers for Python" in prose. + - **Docker.Avoid**: hedge words like "very", "simply" → reword + - **Docker.We**: first-person plural → rewrite to "you" or imperative + - Info-level suggestions (e.g. "VS Code" → "versus") are not blocking but review them + + Re-run `docker buildx bake vale` after fixes until no errors remain in the new files. +5. Verify in local dev server (`HUGO_PORT=1314 docker compose watch`): + - Guide appears when filtering by its language + - Guide appears when filtering by `Testing with Docker` tag + - Stepper navigation works across chapters + - All links resolve (no 404s) +6. Verify all external URLs return 200: + ```bash + curl -s -o /dev/null -w "%{http_code}" -L "{url}" + ``` + +## Step 9: Commit + +One commit per guide. Message format: +``` +feat(guides): add testcontainers {lang} {guide-id} guide + +Migrated from https://github.com/testcontainers/{REPO_NAME} +Updated to testcontainers-{lang} v{version} API. +``` + +## Special cases + +- **introducing-testcontainers**: Language-agnostic, conceptual. May overlap with `content/manuals/testcontainers.md`. Review for deduplication before migrating. +- **local-dev-testcontainers-desktop**: About Testcontainers Desktop (now part of Docker Desktop). May need significant rewriting rather than mechanical migration. +- **Java guides**: Many share the same language. Each still gets its own `testcontainers-java-{GUIDE_ID}` directory. + +## Reference: completed migration (Go getting-started) + +Use `content/guides/testcontainers-go-getting-started/` as the reference implementation: +- `_index.md` — landing page with frontmatter, prerequisites, learning objectives +- `create-project.md` (weight: 10) — project setup and business logic +- `write-tests.md` (weight: 20) — first test with testcontainers-go +- `test-suites.md` (weight: 30) — container reuse with testify suites +- `run-tests.md` (weight: 40) — running tests, summary, further reading diff --git a/.agents/skills/triage-issue/SKILL.md b/.agents/skills/triage-issue/SKILL.md new file mode 100644 index 00000000000..c4fcf19bb85 --- /dev/null +++ b/.agents/skills/triage-issue/SKILL.md @@ -0,0 +1,150 @@ +--- +name: triage-issue +description: > + Analyze a single GitHub issue for docker/docs — check whether the problem + still exists, determine a verdict, and report findings. Use when asked to + triage, assess, or review an issue, even if the user doesn't say "triage" + explicitly: "triage issue 1234", "is issue 500 still valid", "should we + close #200", "look at this issue", "what's going on with #200". +argument-hint: "<issue-number>" +context: fork +--- + +# Triage Issue + +Given GitHub issue **$ARGUMENTS** from docker/docs, figure out whether +it's still a real problem and say what should happen next. + +## 1. Fetch the issue + +```bash +gh issue view $ARGUMENTS --repo docker/docs \ + --json number,title,body,state,labels,createdAt,updatedAt,closedAt,assignees,author,comments +``` + +## 2. Understand the problem + +Read the issue body and all comments. Identify: + +- What is the reported problem? +- What content, URL, or file does it reference? +- Has anyone already proposed a fix or workaround in the comments? + +Check for linked PRs in the issue timeline, not only in the issue body or +comments: + +```bash +gh api repos/docker/docs/issues/$ARGUMENTS/timeline --paginate \ + --jq '.[] | select(.event=="cross-referenced" or .event=="connected" or .event=="referenced") | {event, created_at, source: .source.issue.html_url, title: .source.issue.title, state: .source.issue.state}' +``` + +If an open PR already addresses the issue, don't open another PR. Review the +existing PR instead, and report that the issue already has an associated PR. A +merged PR is strong evidence the issue is fixed. A closed-without-merge PR means +the issue is likely still open. + +## 3. Follow URLs + +Find all `docs.docker.com` URLs in the issue body and comments. For each: + +- Fetch the URL to check if it still exists (404 = content removed or moved) +- Check whether the content still contains the problem described +- Note when the page was last updated relative to when the issue was filed + +For non-docs URLs (GitHub links, external references), fetch them too if +they are central to understanding the issue. + +## 4. Check the repository + +If the issue references specific files, content sections, or code: + +- Find and read the current version of that content +- Check whether the problem has been fixed, content moved, or file removed +- Remember the `/manuals` prefix mapping when looking up files + +## 5. Check for upstream ownership + +If the issue is about content in `_vendor/` or `data/cli/`, it cannot be +fixed here. Identify which upstream repo owns it (see the vendored content +table in CLAUDE.md). + +## 6. Decide and act + +After investigating, pick one of these verdicts and take the corresponding +action on the issue: + +- **Close it** — the problem is already fixed, the content no longer exists, + or the issue is too outdated to be useful. Close the issue with a comment + explaining why: + + ```bash + gh issue close $ARGUMENTS --repo docker/docs \ + --comment "Closing: <one-sentence reason>" + ``` + +- **Fix it** — the problem is real and fixable in this repo. Name the + file(s) and what needs to change. Label the issue `status/confirmed` and + remove `status/triage` if present: + + ```bash + gh api repos/docker/docs/issues/$ARGUMENTS/labels \ + --method POST --field 'labels[]=status/confirmed' + gh api repos/docker/docs/issues/$ARGUMENTS/labels/status%2Ftriage \ + --method DELETE || true + ``` + +- **Escalate upstream** — the problem is real but lives in vendored content. + Name the upstream repo. Label the issue `status/upstream` and remove + `status/triage` if present: + + ```bash + gh api repos/docker/docs/issues/$ARGUMENTS/labels \ + --method POST --field 'labels[]=status/upstream' + gh api repos/docker/docs/issues/$ARGUMENTS/labels/status%2Ftriage \ + --method DELETE || true + ``` + +- **Leave it open** — you can't determine the current state, or the issue + needs human judgment. Label the issue `status/needs-analysis`: + + ```bash + gh api repos/docker/docs/issues/$ARGUMENTS/labels \ + --method POST --field 'labels[]=status/needs-analysis' + gh api repos/docker/docs/issues/$ARGUMENTS/labels/status%2Ftriage \ + --method DELETE || true + ``` + +Don't overthink the classification. An old issue isn't stale if the problem +still exists. An upstream issue is still valid — it's just not fixable here. + +Also apply the most relevant `area/` label based on the content affected. +Available area labels: `area/accounts`, `area/admin`, `area/ai`, +`area/api`, `area/billing`, `area/build`, `area/build-cloud`, `area/cli`, +`area/compose`, `area/compose-spec`, `area/config`, `area/contrib`, +`area/copilot`, `area/desktop`, `area/dhi`, `area/engine`, +`area/enterprise`, `area/extensions`, `area/get-started`, `area/guides`, +`area/hub`, `area/install`, `area/networking`, `area/offload`, +`area/release-notes`, `area/samples`, `area/scout`, `area/security`, +`area/storage`, `area/subscription`, `area/swarm`, `area/ux`. Pick one +(or at most two if the issue clearly spans areas). Skip if none fit. + +```bash +gh api repos/docker/docs/issues/$ARGUMENTS/labels \ + --method POST --field 'labels[]=area/<name>' +``` + +## 7. Report + +Write a short summary: what the issue reports, what you found, and what +should happen next. Reference the specific files, URLs, or PRs that support +your conclusion. Skip metadata fields — the issue itself has the dates and +labels. Mention the action you took (closed, labeled, etc.). + +## Notes + +- Always check timeline cross-references before deciding to fix an issue +- Do not narrate your process — produce the final report +- End every issue comment with an accurate agent-disclosure footer that names + the active coding agent, for example `Generated by Codex` or `Generated by +Claude Code`. +. diff --git a/.agents/skills/write/SKILL.md b/.agents/skills/write/SKILL.md new file mode 100644 index 00000000000..24f11e2b341 --- /dev/null +++ b/.agents/skills/write/SKILL.md @@ -0,0 +1,87 @@ +--- +name: write +description: > + Write a documentation fix on a branch. Makes the minimal change, formats, + self-reviews, and commits. Use after research has identified what to change. + "write the fix", "make the changes", "implement the fix for #1234". +hooks: + PostToolUse: + - matcher: "Edit|Write" + hooks: + - type: command + command: "bash ${CLAUDE_SKILL_DIR}/scripts/post-edit.sh" +--- + +# Write + +Make the minimal change that resolves the issue. Research has already +identified what to change — this skill handles the edit, formatting, +self-review, and commit. + +## 1. Create a branch + +```bash +git checkout -b fix/issue-<number>-<short-desc> main +``` + +Use a short kebab-case description derived from the issue title (3-5 words). + +## 2. Read then edit + +Always read each file before modifying it. Make the minimal change that +fixes the issue. Do not improve surrounding content, add comments, or +address adjacent problems. + +Follow the writing guidelines in CLAUDE.md, STYLE.md, and COMPONENTS.md. + +## 3. Front matter check + +Every content page requires `title`, `description`, and `keywords` in its +front matter. If any are missing from a file you touch, add them. + +## 4. Validate + +Prettier runs automatically after each edit via the PostToolUse hook. +Run lint manually after all edits are complete: + +```bash +${CLAUDE_SKILL_DIR}/scripts/lint.sh <changed-files> +``` + +The lint script runs markdownlint and vale on only the files you pass it, +so the output is scoped to your changes. Fix any errors it reports. + +## 5. Self-review + +Re-read each changed file: right file, right lines, change is complete, +front matter is present. Run `git diff` and verify only intended changes +are present. + +## 6. Commit + +Stage only the changed files: + +```bash +git add <files> +git diff --cached --name-only # verify — no package-lock.json or other noise +git commit -m "$(cat <<'EOF' +docs: <short description under 72 chars> (fixes #NNNN) + +<What was wrong: one sentence citing the specific problem.> +<What was changed: one sentence describing the exact edit.> + +Co-Authored-By: Claude <noreply@anthropic.com> +EOF +)" +``` + +The commit body is mandatory. A reviewer reading only the commit should +understand the problem and the fix without opening the issue. + +## Notes + +- Never edit `_vendor/` or `data/cli/` — these are vendored +- If a file doesn't exist, check for renames: + `git log --all --full-history -- "**/filename.md"` +- If the fix requires a URL that cannot be verified, stop and report a + blocker rather than guessing diff --git a/.agents/skills/write/scripts/lint.sh b/.agents/skills/write/scripts/lint.sh new file mode 100755 index 00000000000..4ccb1fd2029 --- /dev/null +++ b/.agents/skills/write/scripts/lint.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Run markdownlint and vale on specific files. +# Usage: .agents/skills/write/scripts/lint.sh <file> [file...] +# +# Designed for agent workflows — scoped output, no repo-wide noise. +# For full repo validation, use: docker buildx bake validate +set -uo pipefail + +if [ $# -eq 0 ]; then + echo "Usage: $0 <file> [file...]" >&2 + exit 1 +fi + +exit_code=0 + +echo "=== markdownlint ===" +if ! npx markdownlint-cli "$@" 2>&1; then + exit_code=1 +fi + +echo "" +echo "=== vale ===" +if ! vale "$@" 2>&1; then + exit_code=1 +fi + +exit $exit_code diff --git a/.agents/skills/write/scripts/post-edit.sh b/.agents/skills/write/scripts/post-edit.sh new file mode 100755 index 00000000000..d5cb809554f --- /dev/null +++ b/.agents/skills/write/scripts/post-edit.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# PostToolUse hook for Edit/Write in the write skill. +# Auto-formats Markdown files with prettier after each edit. +set -euo pipefail + +input=$(cat) +file_path=$(echo "$input" | jq -r '.tool_input.file_path // empty') + +[ -z "$file_path" ] && exit 0 +[[ "$file_path" != *.md ]] && exit 0 + +npx prettier --write "$file_path" 2>/dev/null diff --git a/.claude b/.claude new file mode 120000 index 00000000000..c0ca4685663 --- /dev/null +++ b/.claude @@ -0,0 +1 @@ +.agents \ No newline at end of file diff --git a/.dockerignore b/.dockerignore index 24fffe8d3b3..ff3cf374b3b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,6 +3,7 @@ .gitignore .idea .hugo_build.lock +hugo_stats.json _releaser CONTRIBUTING.md Dockerfile diff --git a/.gitattributes b/.gitattributes index 2929076bea1..5d685b7d139 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,3 +3,11 @@ # Fine-tune GitHub's language detection content/**/*.md linguist-detectable + +# Mark generated and vendored content +# These files should not be edited directly in this repository + +# Vendored Hugo modules (from upstream repositories) +/_vendor/** linguist-generated=true +# Generated CLI reference data (vendored from upstream) +/data/cli/** linguist-generated=true diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5c0e55719ca..a7242821ed8 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -3,9 +3,9 @@ # For more details, see https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners -/content/manuals/build/ @crazy-max @ArthurFlag +/content/manuals/build/ @dvdksn -/content/manuals/build-cloud/ @crazy-max @craig-osterhout +/content/manuals/build-cloud/ @craig-osterhout /content/manuals/compose/ @aevesdocker @@ -19,28 +19,26 @@ /content/manuals/docker-hub/ @craig-osterhout -/content/manuals/engine/ @thaJeztah @ArthurFlag +/content/manuals/engine/ @dvdksn -/content/reference/api/engine/ @thaJeztah @ArthurFlag +/content/reference/api/engine/ @dvdksn -/content/reference/cli/ @thaJeztah @ArthurFlag +/content/reference/cli/ @dvdksn /content/manuals/subscription/ @sarahsanders-docker /content/manuals/security/ @aevesdocker @sarahsanders-docker -/content/manuals/trusted-content/ @craig-osterhout - -/content/manuals/docker-hub/official_images/ @craig-osterhout - -/content/manuals/registry/ @craig-osterhout - /content/manuals/admin/ @sarahsanders-docker /content/manuals/billing/ @sarahsanders-docker /content/manuals/accounts/ @sarahsanders-docker -/content/manuals/ai/ @ArthurFlag +/content/manuals/ai/ @dvdksn + +/_vendor @dvdksn + +/content/manuals/offload/ @craig-osterhout -/_vendor @sarahsanders-docker @ArthurFlag +/content/manuals/dhi/ @craig-osterhout diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 8762e0feb7a..33e46038c05 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -6,15 +6,9 @@ contact_links: - name: Moby url: https://github.com/moby/moby/issues about: Bug reports for Docker Engine - - name: Docker Desktop for Windows - url: https://github.com/docker/for-win/issues - about: Bug reports for Docker Desktop for Windows - - name: Docker Desktop for Mac - url: https://github.com/docker/for-mac/issues - about: Bug reports for Docker Desktop for Mac - - name: Docker Desktop for Linux - url: https://github.com/docker/for-linux/issues - about: Bug reports for Docker Desktop for Linux + - name: Docker Desktop + url: https://github.com/docker/desktop-feedback + about: Bug reports for Docker Desktop - name: Docker Compose url: https://github.com/docker/compose/issues about: Bug reports for Docker Compose diff --git a/.github/agents/docs-scanner.yaml b/.github/agents/docs-scanner.yaml new file mode 100644 index 00000000000..08731db9a61 --- /dev/null +++ b/.github/agents/docs-scanner.yaml @@ -0,0 +1,189 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/docker/docker-agent/refs/heads/main/cagent-schema.json +models: + claude-sonnet: + provider: anthropic + model: claude-sonnet-4-5 + max_tokens: 8192 + temperature: 0.3 + +agents: + root: + model: claude-sonnet + description: Daily documentation quality scanner for Docker docs + add_prompt_files: + - STYLE.md + instruction: | + You are an experienced technical writer reviewing Docker documentation + (https://docs.docker.com/) for substantive quality problems. The docs + are maintained in this repository under content/. Your job is to read a + subsection of the docs, identify genuine problems a reader would notice, + and file GitHub issues only for the ones that are clearly worth fixing. + + ## Setup + + 1. Read `.cache/scan-history.json` using `read_file`. + This file tracks every previously scanned directory as a JSON object: + ```json + { + "scanned": { + "content/manuals/desktop/networking/": "2026-02-24", + "content/manuals/build/cache/": "2026-02-23" + } + } + ``` + If the file does not exist or is empty, treat it as `{"scanned": {}}`. + + 2. Call `get_memories` to load any learned patterns from previous scans + (false positives to skip, codebase context, human feedback). + + 3. Use `list_directory` to explore `content/manuals/` and find all leaf + directories (no subdirectories). Skip these top-level paths entirely: + content/reference/, content/languages/, content/tags/, + content/includes/. + + 4. Pick a leaf directory to scan: + - FIRST CHOICE: a directory that does NOT appear in scan-history.json + - FALLBACK: if every leaf directory has been scanned, pick the one + with the OLDEST date in scan-history.json + + 5. Call `directory_tree` on the selected leaf and read ALL its files. + Cross-referencing between files in the same directory is one of the + most valuable things you can do — read everything before drawing + conclusions. + + 6. Analyze the files, apply the self-check below, then file issues only + for what passes. **File only what is genuinely worth fixing.** If you + find one strong issue, file one. If you find nothing substantive, file + zero. A run with zero issues is a success, not a failure. Do not + search for issues to fill a quota. + + 7. After scanning, update `.cache/scan-history.json` using `write_file`. + Read the current content, add or update the scanned path with today's + date (YYYY-MM-DD), and write the full updated JSON back. + + 8. If you learn anything useful for future scans (false positive patterns, + codebase context), call `add_memory` to store it. Do NOT use + `add_memory` for scan tracking — that is what scan-history.json is for. + + ## What good issues look like + + Ask yourself: would a reader following this documentation be confused or + misled? The bar is high. File issues only when the answer is clearly yes. + + - **Cross-document contradictions**: two pages in the same directory give + conflicting information about the same feature or procedure — one says + the flag is required, another says it's optional; one gives different + default values than another + - **Completed transitions still framed as in-progress**: prose says "is + being migrated to", "will replace", "is rolling out", or "is in the + process of" for something that sibling pages or other context show has + already happened + - **Clearly wrong version framing**: a page treats a years-old release as + "new" or "recent" in a way that makes readers doubt whether the docs + reflect current reality (e.g. "Docker Engine 23.0 introduced this" where + 23.0 shipped in 2023 and the framing suggests it was recent) + - **Broken cross-reference context**: a link's surrounding prose describes + a destination that no longer matches what the linked page actually covers + (the URL may resolve, but the context is wrong) + - **Missing deprecation notice**: a page describes a feature that is + removed or deprecated with no notice pointing readers elsewhere + + ## Before filing — self-check + + Answer these four questions. If you can't answer 1 and 2 with yes, or if + 3 or 4 is yes, do not file the issue. + + 1. Can I quote the specific wrong text from the file? + 2. Would a reader following this documentation actually be confused or + misled — not just a style nitpick, but a real comprehension problem? + 3. Is this something already caught by automated tooling? + - Broken or missing links → htmltest catches these; do not file + - Time-relative words ("currently", "recently", "still", "yet", + "new") with no broader context problem → Vale catches these + - Formatting or style problems → markdownlint/Vale catch these + 4. Is this a legitimate product feature gate? "Limited Access", "Contact + your Docker account team to request access", "available on paid plans", + "coming soon for Business subscribers" are product decisions, not stale + documentation — do not flag them. + + ## What not to file + + - **Broken links** — htmltest catches these; do not file. This includes + links to non-existent files within the repo ("broken cross-reference" + is still a broken link). Only file if the URL resolves but the + surrounding prose misdescribes the destination. + - **Single time-relative words** in otherwise accurate sentences — + "currently", "recently", "still", "yet", "usually" — Vale already flags + these; your job is to find problems Vale can't see + - **Feature gates and access tiers** — "Limited Access" badges, "Contact + sales", "request access" language — these are intentional product + decisions + - **Vague verification tasks** — "verify this diagram is up to date", + "check these links are still valid" — if you cannot identify the + specific problem from reading the file, don't file + - **Style and formatting** — Vale and markdownlint handle these + - **Suspicions without evidence** — you must quote the specific wrong text + + ## Filing issues + + Check for duplicates first: + ```bash + FILE_PATH="path/to/file.md" + gh issue list --repo docker/docs --label "agent/generated" --state open --search "in:body \"$FILE_PATH\"" + ``` + + Then create: + ```bash + ISSUE_TITLE="[docs-scanner] Brief description" + cat << 'EOF' | gh issue create \ + --repo docker/docs \ + --title "$ISSUE_TITLE" \ + --label "agent/generated" \ + --body-file - + **File:** `path/to/file.md` + + ### Issue + + What's wrong, with an exact quote from the file: + + > quoted text + + ### Why this matters + + Explain how a reader would be confused or misled by this. + + ### Suggested fix + + What should change, with specific alternative wording where possible. + + --- + *Found by nightly documentation quality scanner* + EOF + ``` + + ## Output + + ``` + SCAN COMPLETE + Subsection: content/manuals/desktop/features/ + Files checked: N + Issues created: N (or "0 — nothing substantive found") + - #123: [docs-scanner] Issue title + ``` + + toolsets: + - type: filesystem + tools: + - read_file + - read_multiple_files + - write_file + - list_directory + - directory_tree + - type: memory + path: .cache/scanner-memory.db + - type: shell + +permissions: + allow: + - shell:cmd=gh issue list --* + - shell:cmd=gh issue create --* diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b34db2bb401..5af531c4621 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,3 +5,9 @@ updates: directory: "/" schedule: interval: "daily" + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + ignore: + - dependency-name: "*" diff --git a/.github/labeler.yml b/.github/labeler.yml index c610f1df620..24fd8554540 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -50,6 +50,11 @@ area/build-cloud: - any-glob-to-any-file: - content/manuals/build-cloud/** +area/offload: + - changed-files: + - any-glob-to-any-file: + - content/manuals/offload/** + area/compose: - changed-files: - any-glob-to-any-file: @@ -62,6 +67,11 @@ area/desktop: - any-glob-to-any-file: - content/manuals/desktop/** +area/dhi: + - changed-files: + - any-glob-to-any-file: + - content/manuals/dhi/** + area/engine: - changed-files: - any-glob-to-any-file: @@ -171,6 +181,11 @@ area/copilot: - any-glob-to-any-file: - content/manuals/copilot/** +ci: + - changed-files: + - any-glob-to-any-file: + - .github/workflows/** + hugo: - changed-files: - any-glob-to-any-file: @@ -179,7 +194,6 @@ hugo: - hugo_stats.json - i18n/** - layouts/** - - postcss.config.js - static/** - tailwind.config.js diff --git a/.github/prompts/freshness-tier1.prompt.md b/.github/prompts/freshness-tier1.prompt.md new file mode 100644 index 00000000000..41a784ccb23 --- /dev/null +++ b/.github/prompts/freshness-tier1.prompt.md @@ -0,0 +1,17 @@ +--- +mode: 'edit' +--- + +Imagine you're an experienced technical writer. You need to review content for +how fresh and up to date it is. Apply the following: + +1. Fix spelling errors and typos +2. Verify whether the markdown structure conforms to common markdown standards +3. Ensure the content follows our [style guide file](../instructions/styleguide-instructions.md) as a guide. +4. Make sure the titles on the page provide better context about the content (for an improved search experience). +5. Ensure all the components formatted correctly. +6. Improve the SEO keywords. +7. If you find numbered lists, make sure their numbering only uses 1's. +8. Ensure each line is limited to 80 characters. + +Do your best and don't be lazy. \ No newline at end of file diff --git a/.github/prompts/freshness-tier2.prompt.md b/.github/prompts/freshness-tier2.prompt.md new file mode 100644 index 00000000000..e6cd05cba72 --- /dev/null +++ b/.github/prompts/freshness-tier2.prompt.md @@ -0,0 +1,23 @@ +--- +mode: 'edit' +--- + +Imagine you're an experienced technical writer. You need to review content for +how fresh and up to date it is. Apply the following: + +1. Improve the presentational layer - components, splitting up the page into smaller pages + Consider the following: + + 1. Can you use tabs to display multiple variants of the same steps? + 2. Can you make a key item of information stand out with a call-out? + 3. Can you reduce a large amount of text to a series of bullet points? + 4. Are there other code components you could use? +2. Check if any operating systems or package versions mentioned are still current and supported +3. Check the accuracy of the content +4. If appropriate, follow the document from start to finish to see if steps make sense in sequence +5. Try to add some helpful next steps to the end of the document, but only if there are no *Next steps* or *Related pages* section, already. +6. Try to clarify, shorten or improve the efficiency of some sentences. +7. Check for LLM readability. +8. Ensure each line is limited to 80 characters. + +Do your best and don't be lazy. diff --git a/.github/prompts/review.prompt.md b/.github/prompts/review.prompt.md new file mode 100644 index 00000000000..47a39e8e14c --- /dev/null +++ b/.github/prompts/review.prompt.md @@ -0,0 +1,7 @@ +--- +mode: edit +description: You are a technical writer reviewing an article for clarity, conciseness, and adherence to the documentation writing style guidelines. +--- +Review the article for clarity, conciseness, and adherence to our documentation [style guidelines](../instructions/styleguide-instructions.md). + +Provide concrete and practical suggestions for improvement. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b62404c71e0..161fd2bdfff 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,20 +25,17 @@ jobs: steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4 with: version: ${{ env.SETUP_BUILDX_VERSION }} driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }} - name: Build - uses: docker/bake-action@v6 + uses: docker/bake-action@82490499d2e5613fcead7e128237ef0b0ea210f7 # v7 with: files: | docker-bake.hcl targets: releaser-build - set: | - *.cache-from=type=gha,scope=releaser - *.cache-to=type=gha,scope=releaser,mode=max build: runs-on: ubuntu-24.04 @@ -47,24 +44,21 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4 - name: Build - uses: docker/bake-action@v6 + uses: docker/bake-action@82490499d2e5613fcead7e128237ef0b0ea210f7 # v7 with: source: . files: | docker-bake.hcl targets: release - set: | - *.cache-from=type=gha,scope=build - *.cache-to=type=gha,scope=build,mode=max - name: Check Cloudfront config - uses: docker/bake-action@v6 + uses: docker/bake-action@82490499d2e5613fcead7e128237ef0b0ea210f7 # v7 with: source: . targets: aws-cloudfront-update @@ -74,17 +68,6 @@ jobs: AWS_CLOUDFRONT_ID: 0123456789ABCD AWS_LAMBDA_FUNCTION: DockerDocsRedirectFunction-dummy - vale: - if: ${{ github.event_name == 'pull_request' }} - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v4 - - uses: errata-ai/vale-action@reviewdog - env: - PIP_BREAK_SYSTEM_PACKAGES: 1 - with: - files: content - validate: runs-on: ubuntu-24.04 strategy: @@ -92,24 +75,35 @@ jobs: matrix: target: - lint + - vale - test - unused-media - test-go-redirects - dockerfile-lint - - path-warnings + - validate-vendor steps: + - + name: Checkout + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4 - name: Validate - uses: docker/bake-action@v6 + uses: docker/bake-action@82490499d2e5613fcead7e128237ef0b0ea210f7 # v7 with: + source: . files: | docker-bake.hcl targets: ${{ matrix.target }} - set: | - *.args.BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 - *.cache-to=type=gha,scope=validate-${{ matrix.target }},mode=max - *.cache-from=type=gha,scope=validate-${{ matrix.target }} - *.cache-from=type=gha,scope=build + - + name: Install reviewdog + if: ${{ matrix.target == 'vale' && github.event_name == 'pull_request' }} + uses: reviewdog/action-setup@d8a7baabd7f3e8544ee4dbde3ee41d0011c3a93f # v1.5.0 + - + name: Run reviewdog for vale + if: ${{ matrix.target == 'vale' && github.event_name == 'pull_request' }} + run: | + cat ./tmp/vale.out | reviewdog -f=rdjsonl -name=vale -reporter=github-pr-annotations -fail-on-error=false -filter-mode=added -level=info -fail-level=warning + env: + REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 8ce0b6285e8..f704a0d5d9b 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -1,5 +1,5 @@ name: deploy - +# Deploys the Docker Docs website when merging to the `main` branch. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -8,9 +8,8 @@ on: workflow_dispatch: push: branches: - - lab - main - - published + - lab env: # Use edge release of buildx (latest RC, fallback to latest stable) @@ -22,88 +21,54 @@ permissions: id-token: write contents: read +# The `main` branch is deployed to the production environment. +# The `lab` branch is deployed to a separate environment for testing purposes. jobs: publish: runs-on: ubuntu-24.04 if: github.repository_owner == 'docker' steps: - - - name: Prepare - run: | - HUGO_ENV=development - DOCS_AWS_REGION=us-east-1 - if [ "${{ github.ref }}" = "refs/heads/main" ]; then - HUGO_ENV=staging - DOCS_URL="https://docs-stage.docker.com" - DOCS_AWS_IAM_ROLE="arn:aws:iam::710015040892:role/stage-docs-docs.docker.com-20220818202135984800000001" - DOCS_S3_BUCKET="stage-docs-docs.docker.com" - DOCS_S3_CONFIG="s3-config.json" - DOCS_CLOUDFRONT_ID="E1R7CSW3F0X4H8" - DOCS_LAMBDA_FUNCTION_REDIRECTS="DockerDocsRedirectFunction-stage" - DOCS_SLACK_MSG="Successfully deployed docs-stage from main branch. $DOCS_URL" - elif [ "${{ github.ref }}" = "refs/heads/published" ]; then - HUGO_ENV=production - DOCS_URL="https://docs.docker.com" - DOCS_AWS_IAM_ROLE="arn:aws:iam::710015040892:role/prod-docs-docs.docker.com-20220818202218674300000001" - DOCS_S3_BUCKET="prod-docs-docs.docker.com" - DOCS_S3_CONFIG="s3-config.json" - DOCS_CLOUDFRONT_ID="E228TTN20HNU8F" - DOCS_LAMBDA_FUNCTION_REDIRECTS="DockerDocsRedirectFunction-prod" - DOCS_SLACK_MSG="Successfully deployed docs from published branch. $DOCS_URL" - elif [ "${{ github.ref }}" = "refs/heads/lab" ]; then - HUGO_ENV=lab - DOCS_URL="https://docs-labs.docker.com" - DOCS_AWS_IAM_ROLE="arn:aws:iam::710015040892:role/labs-docs-docs.docker.com-20220818202218402500000001" - DOCS_S3_BUCKET="labs-docs-docs.docker.com" - DOCS_S3_CONFIG="s3-config.json" - DOCS_CLOUDFRONT_ID="E1MYDYF65FW3HG" - DOCS_LAMBDA_FUNCTION_REDIRECTS="DockerDocsRedirectFunction-labs" - else - echo >&2 "ERROR: unknown branch ${{ github.ref }}" - exit 1 - fi - SEND_SLACK_MSG="true" - if [ -z "$DOCS_AWS_IAM_ROLE" ] || [ -z "$DOCS_S3_BUCKET" ] || [ -z "$DOCS_CLOUDFRONT_ID" ] || [ -z "$DOCS_SLACK_MSG" ]; then - SEND_SLACK_MSG="false" - fi - echo "BRANCH_NAME=${GITHUB_REF#refs/heads/}" >> $GITHUB_ENV - echo "HUGO_ENV=$HUGO_ENV" >> $GITHUB_ENV - echo "DOCS_URL=$DOCS_URL" >> $GITHUB_ENV - echo "DOCS_AWS_REGION=$DOCS_AWS_REGION" >> $GITHUB_ENV - echo "DOCS_AWS_IAM_ROLE=$DOCS_AWS_IAM_ROLE" >> $GITHUB_ENV - echo "DOCS_S3_BUCKET=$DOCS_S3_BUCKET" >> $GITHUB_ENV - echo "DOCS_S3_CONFIG=$DOCS_S3_CONFIG" >> $GITHUB_ENV - echo "DOCS_CLOUDFRONT_ID=$DOCS_CLOUDFRONT_ID" >> $GITHUB_ENV - echo "DOCS_LAMBDA_FUNCTION_REDIRECTS=$DOCS_LAMBDA_FUNCTION_REDIRECTS" >> $GITHUB_ENV - echo "DOCS_SLACK_MSG=$DOCS_SLACK_MSG" >> $GITHUB_ENV - echo "SEND_SLACK_MSG=$SEND_SLACK_MSG" >> $GITHUB_ENV - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 with: fetch-depth: 0 + - + name: Set environment variables + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 + env: + INPUT_GITHUB-REF: ${{ github.ref }} + with: + script: | + const fs = require('fs'); + const env = JSON.parse(fs.readFileSync('hack/releaser/env.json', 'utf8')); + const ref = core.getInput('github-ref'); + if (!env.hasOwnProperty(ref)) { + core.setFailed(`ERROR: unknown branch ${ref}`); + } + for (const [key, value] of Object.entries(env[ref])) { + core.exportVariable(key, value); + core.info(`${key}=${value}`); + } - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4 with: version: ${{ env.SETUP_BUILDX_VERSION }} driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }} - name: Build website - uses: docker/bake-action@v6 + uses: docker/bake-action@82490499d2e5613fcead7e128237ef0b0ea210f7 # v7 with: source: . files: | docker-bake.hcl targets: release - set: | - *.cache-from=type=gha,scope=deploy-${{ env.BRANCH_NAME }} - *.cache-to=type=gha,scope=deploy-${{ env.BRANCH_NAME }},mode=max provenance: false - name: Configure AWS Credentials if: ${{ env.DOCS_AWS_IAM_ROLE != '' }} - uses: aws-actions/configure-aws-credentials@v4 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5 with: role-to-assume: ${{ env.DOCS_AWS_IAM_ROLE }} aws-region: ${{ env.DOCS_AWS_REGION }} @@ -112,7 +77,6 @@ jobs: if: ${{ env.DOCS_S3_BUCKET != '' }} run: | aws --region ${{ env.DOCS_AWS_REGION }} s3 sync \ - --acl public-read \ --delete \ --exclude "*" \ --include "*.webp" \ @@ -121,29 +85,37 @@ jobs: --content-type="image/webp" \ public s3://${{ env.DOCS_S3_BUCKET }}/ aws --region ${{ env.DOCS_AWS_REGION }} s3 sync \ - --acl public-read \ --delete \ --exclude "*.webp" \ + --exclude "pagefind/*.pf_meta" \ + --exclude "pagefind/fragment/*.pf_fragment" \ public s3://${{ env.DOCS_S3_BUCKET }}/ - - name: Update S3 config - if: ${{ env.DOCS_S3_BUCKET != '' && env.DOCS_S3_CONFIG != '' }} - uses: docker/bake-action@v6 - with: - source: . - files: | - docker-bake.hcl - targets: aws-s3-update-config - set: | - *.cache-from=type=gha,scope=releaser - env: - AWS_REGION: ${{ env.DOCS_AWS_REGION }} - AWS_S3_BUCKET: ${{ env.DOCS_S3_BUCKET }} - AWS_S3_CONFIG: ${{ env.DOCS_S3_CONFIG }} + name: Upload pagefind files with compression headers + if: ${{ env.DOCS_S3_BUCKET != '' }} + run: | + aws --region ${{ env.DOCS_AWS_REGION }} s3 cp \ + --recursive \ + --content-encoding="gzip" \ + --content-type="application/octet-stream" \ + --metadata-directive="REPLACE" \ + public/pagefind/ s3://${{ env.DOCS_S3_BUCKET }}/pagefind/ \ + --exclude "*" \ + --include "*.pf_meta" \ + --include "*.pf_fragment" + - + name: Set markdown Content-Type on llms.txt + if: ${{ env.DOCS_S3_BUCKET != '' }} + run: | + aws --region ${{ env.DOCS_AWS_REGION }} s3 cp \ + s3://${{ env.DOCS_S3_BUCKET }}/llms.txt \ + s3://${{ env.DOCS_S3_BUCKET }}/llms.txt \ + --content-type="text/markdown" \ + --metadata-directive="REPLACE" - name: Update Cloudfront config if: ${{ env.DOCS_CLOUDFRONT_ID != '' }} - uses: docker/bake-action@v6 + uses: docker/bake-action@82490499d2e5613fcead7e128237ef0b0ea210f7 # v7 with: source: . files: | @@ -161,8 +133,3 @@ jobs: env: AWS_REGION: us-east-1 # cloudfront is only available in us-east-1 region AWS_MAX_ATTEMPTS: 5 - - - name: Send Slack notification - if: ${{ env.SEND_SLACK_MSG == 'true' }} - run: | - curl -X POST -H 'Content-type: application/json' --data '{"text":"${{ env.DOCS_SLACK_MSG }}"}' ${{ secrets.SLACK_WEBHOOK }} diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index bdaa6a0e52b..5dd213676ba 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -5,7 +5,7 @@ concurrency: cancel-in-progress: true on: - pull_request_target: + workflow_dispatch: jobs: labeler: @@ -16,4 +16,4 @@ jobs: steps: - name: Run - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0 + uses: actions/labeler@634933edcd8ababfe52f92936142cc22ac488b1b # v6.0.1 diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml deleted file mode 100644 index 7b842d08e74..00000000000 --- a/.github/workflows/merge.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: merge - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -# open or update publishing PR when there is a push to main -on: - workflow_dispatch: - push: - branches: - - main - -jobs: - main-to-published: - runs-on: ubuntu-24.04 - if: github.repository_owner == 'docker' - steps: - - uses: actions/checkout@v4 - with: - ref: published - - name: Reset published branch - run: | - git fetch origin main:main - git reset --hard main - - name: Create Pull Request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e - with: - delete-branch: false - branch: published-update - commit-message: publish updates from main - labels: area/release - title: publish updates from main - body: | - Automated pull request for publishing docs updates. diff --git a/.github/workflows/nightly-docs-scan.yml b/.github/workflows/nightly-docs-scan.yml new file mode 100644 index 00000000000..9cb29884796 --- /dev/null +++ b/.github/workflows/nightly-docs-scan.yml @@ -0,0 +1,86 @@ +name: Nightly Documentation Scan + +on: + schedule: + # Run every day at 3am UTC + - cron: "0 3 * * *" + workflow_dispatch: + inputs: + dry-run: + description: "Report issues but do not create them" + type: boolean + default: false + +permissions: + contents: read + issues: write + +concurrency: + group: nightly-docs-scan + cancel-in-progress: false + +jobs: + scan: + runs-on: ubuntu-latest + timeout-minutes: 20 + permissions: + id-token: write + contents: read + issues: write + + steps: + - name: Checkout repository + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + with: + fetch-depth: 1 + + - name: Ensure cache directory exists + run: mkdir -p "${{ github.workspace }}/.cache" + + - name: Restore scanner state + uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + with: + path: | + ${{ github.workspace }}/.cache/scanner-memory.db + ${{ github.workspace }}/.cache/scan-history.json + key: docs-scanner-state-${{ github.repository }}-${{ github.run_id }} + restore-keys: | + docs-scanner-state-${{ github.repository }}- + + - name: Configure AWS credentials + id: aws-credentials + continue-on-error: true + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4 + with: + role-to-assume: arn:aws:iam::710015040892:role/docker-agent-action-20260409141318957000000001 + aws-region: us-east-1 + + - name: Fetch bot PAT + if: steps.aws-credentials.outcome == 'success' + run: | + PAT=$(aws secretsmanager get-secret-value \ + --secret-id docker-agent-action/github-app \ + --query SecretString \ + --output text | jq -r '.pat') + echo "::add-mask::$PAT" + echo "GITHUB_APP_TOKEN=$PAT" >> "$GITHUB_ENV" + + - name: Run documentation scan + uses: docker/cagent-action@0498757af1c50b084f763d626f571918cf317509 # latest + env: + GH_TOKEN: ${{ env.GITHUB_APP_TOKEN || github.token }} + with: + agent: ${{ github.workspace }}/.github/agents/docs-scanner.yaml + prompt: "${{ inputs.dry-run == true && 'DRY RUN MODE: Do not create any GitHub issues. Report what you would create but skip the gh issue create commands.' || 'Run the nightly documentation scan as described in your instructions.' }}" + anthropic-api-key: ${{ secrets.ANTHROPIC_API_KEY }} + github-token: ${{ env.GITHUB_APP_TOKEN || github.token }} + timeout: 1200 + + - name: Save scanner state + uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + if: always() + with: + path: | + ${{ github.workspace }}/.cache/scanner-memory.db + ${{ github.workspace }}/.cache/scan-history.json + key: docs-scanner-state-${{ github.repository }}-${{ github.run_id }} diff --git a/.github/workflows/notify-release-notes-pr.yml b/.github/workflows/notify-release-notes-pr.yml new file mode 100644 index 00000000000..0d271465c01 --- /dev/null +++ b/.github/workflows/notify-release-notes-pr.yml @@ -0,0 +1,45 @@ +name: Notify Slack on Desktop Release Notes PR + +on: + workflow_run: + workflows: ["Release Notes PR Trigger"] + types: [completed] + +jobs: + notify: + runs-on: ubuntu-24.04 + if: github.repository_owner == 'docker' && github.event.workflow_run.conclusion == 'success' + steps: + - name: Download PR details + uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8 + with: + name: pr-details + github-token: ${{ secrets.GITHUB_TOKEN }} + run-id: ${{ github.event.workflow_run.id }} + + - name: Read PR details + id: pr + run: | + echo "url=$(jq -r .url pr-details.json)" >> "$GITHUB_OUTPUT" + echo "title=$(jq -r .title pr-details.json)" >> "$GITHUB_OUTPUT" + echo "author=$(jq -r .author pr-details.json)" >> "$GITHUB_OUTPUT" + + - name: Notify Slack + uses: slackapi/slack-github-action@af78098f536edbc4de71162a307590698245be95 # v3.0.1 + with: + method: chat.postMessage + token: ${{ secrets.SLACK_BOT_TOKEN }} + payload: | + { + "channel": "${{ secrets.SLACK_RELEASE_CHANNEL_ID }}", + "text": "Desktop release notes", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":memo: *Desktop release notes*:\n<${{ steps.pr.outputs.url }}|${{ steps.pr.outputs.title }}> by <https://github.com/${{ steps.pr.outputs.author }}|${{ steps.pr.outputs.author }}>" + } + } + ] + } diff --git a/.github/workflows/pr-review-trigger.yml b/.github/workflows/pr-review-trigger.yml new file mode 100644 index 00000000000..c176020256e --- /dev/null +++ b/.github/workflows/pr-review-trigger.yml @@ -0,0 +1,33 @@ +name: PR Review - Trigger +on: + pull_request: + types: [ready_for_review, opened, review_requested] + pull_request_review_comment: + types: [created] + +permissions: {} + +jobs: + save-context: + runs-on: ubuntu-latest + steps: + - name: Save event context + env: + PR_NUMBER: ${{ github.event.pull_request.number }} + PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }} + COMMENT_JSON: ${{ toJSON(github.event.comment) }} + run: | + mkdir -p context + printf '%s' "${{ github.event_name }}" > context/event_name.txt + printf '%s' "$PR_NUMBER" > context/pr_number.txt + printf '%s' "$PR_HEAD_SHA" > context/pr_head_sha.txt + if [ "${{ github.event_name }}" = "pull_request_review_comment" ]; then + printf '%s' "$COMMENT_JSON" > context/comment.json + fi + + - name: Upload context + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: pr-review-context + path: context/ + retention-days: 1 diff --git a/.github/workflows/pr-review.yml b/.github/workflows/pr-review.yml new file mode 100644 index 00000000000..63603b77b44 --- /dev/null +++ b/.github/workflows/pr-review.yml @@ -0,0 +1,84 @@ +name: PR Review +on: + issue_comment: + types: [created] + workflow_run: + workflows: ["PR Review - Trigger"] + types: [completed] + +permissions: + contents: read # Required at top-level to give `issue_comment` events access to the secrets below. + +jobs: + review: + if: | + github.event_name == 'issue_comment' || + github.event.workflow_run.conclusion == 'success' + uses: docker/cagent-action/.github/workflows/review-pr.yml@c22076b8856ee12d9b4c4685bb49cf26eb974079 # v1.5.0 + # Scoped to the job so other jobs in this workflow aren't over-permissioned + permissions: + contents: read # Read repository files and PR diffs + pull-requests: write # Post review comments + issues: write # Create security incident issues if secrets detected + checks: write # (Optional) Show review progress as a check run + id-token: write # Required for OIDC authentication to AWS Secrets Manager + actions: read # Download artifacts from trigger workflow + with: + trigger-run-id: ${{ github.event_name == 'workflow_run' && format('{0}', github.event.workflow_run.id) || '' }} + add-prompt-files: STYLE.md,COMPONENTS.md + additional-prompt: | + ## Documentation Review Focus + + This is Docker's official documentation. + You are reviewing **DOCUMENTATION**, not code. Focus on documentation quality, not software bugs. + + **Style guides are available via prompt files (STYLE.md, COMPONENTS.md)** - reference them when evaluating changes. + + ## Priority Issues + + ### 1. Vendored/Generated Content (CRITICAL - Auto-reject) + Flag if changes touch: + - Any file in `_vendor/` directory (vendored from upstream repos) + - Any YAML file in `data/*/*.yaml` subdirectories (CLI reference data generated from upstream) + - Examples: `data/engine-cli/*.yaml`, `data/buildx/*.yaml`, `data/scout-cli/*.yaml` + - Exception: root-level data/ files are manually maintained (allow edits) + + ### 2. Missing Redirects When Removing/Moving Pages (HIGH) + When a PR removes or moves a page: + - Check if the PR adds an `aliases:` entry in the front matter of the target/replacement page + - Example: If `/old/path.md` is removed, there should be `aliases: ["/old/path/"]` in the new page + + ### 3. Markdown Formatting + - Poor markdown syntax (unclosed code blocks, broken lists, indentation issues, etc.) + - Line wrapping over 80 characters (except links, code blocks, tables) + + ### 4. AI-Generated Patterns (HIGH PRIORITY) + Flag AI-isms from STYLE.md: + - Hedge words: simply, just, easily, quickly, seamlessly + - Redundant phrases: "in order to", "allows you to" + - Meta-commentary: "it's worth noting that" + - Marketing speak: "robust", "powerful", "cutting-edge" + - Passive voice: "is used by" → "uses" + + ### 5. Scope Preservation + Does the change match existing document's length and tone? + Check STYLE.md "Scope preservation". + + ### 6. Content Accuracy + - Factually incorrect information (wrong commands, wrong API endpoints) + - Broken links or references + - Contradictory content + - Mismatched information (e.g., code example shows X but text says Y) + - Security issues in example code + + ### 7. Front Matter & Hugo Syntax + - Missing required fields: `title`, `description`, `keywords` + - Incorrect shortcode syntax (check COMPONENTS.md) + - Invalid component usage + + ## Severity + - **high**: Will mislead users or break things (incorrect commands, wrong APIs, security issues, editing vendored files, missing redirects) + - **medium**: Could confuse users or violates style guide (AI-isms, scope inflation, unclear instructions, markdown formatting) + - **low**: Minor suggestions (rarely report) + + Most issues should be MEDIUM. HIGH is for critical problems only. diff --git a/.github/workflows/release-notes-pr-trigger.yml b/.github/workflows/release-notes-pr-trigger.yml new file mode 100644 index 00000000000..1bd3b1c13b1 --- /dev/null +++ b/.github/workflows/release-notes-pr-trigger.yml @@ -0,0 +1,30 @@ +name: Release Notes PR Trigger + +on: + pull_request: + types: [opened] + paths: + - content/manuals/desktop/release-notes.md + +jobs: + trigger: + runs-on: ubuntu-24.04 + if: github.repository_owner == 'docker' + steps: + - name: Save PR details + env: + PR_URL: ${{ github.event.pull_request.html_url }} + PR_TITLE: ${{ github.event.pull_request.title }} + PR_AUTHOR: ${{ github.event.pull_request.user.login }} + run: | + jq -n \ + --arg url "$PR_URL" \ + --arg title "$PR_TITLE" \ + --arg author "$PR_AUTHOR" \ + '{url: $url, title: $title, author: $author}' > pr-details.json + + - name: Upload PR details + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 + with: + name: pr-details + path: pr-details.json diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..237de802db5 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,205 @@ +# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time. +# It also handles lifecycle slash commands for managing stale labels. +# For more information, see: https://github.com/actions/stale +# +# Security: Actions are pinned to full commit SHA to prevent supply chain attacks. +# To update, check releases and update both the SHA and version comment. +# +# Debug mode: +# - Lifecycle commands: Set DEBUG_ONLY to 'true' in the lifecycle-commands job env +# - Stale action: Set debug-only to true in the stale job configuration +name: Mark stale issues and pull requests + +on: + schedule: + - cron: '30 1 * * *' # Daily at 1:30 AM UTC + issue_comment: + types: [created] + +jobs: + lifecycle-commands: + runs-on: ubuntu-latest + if: github.event_name == 'issue_comment' + permissions: + issues: write + pull-requests: write + + steps: + - name: Handle lifecycle commands + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + env: + # Set to 'true' to test without making actual changes + DEBUG_ONLY: 'false' + with: + script: | + const comment = context.payload.comment.body.toLowerCase().trim(); + const debugOnly = process.env.DEBUG_ONLY === 'true'; + + if (debugOnly) { + console.log('🔍 DEBUG MODE: No changes will be made'); + } + + // Define commands and their required permissions + const commands = { + '/lifecycle frozen': { label: 'lifecycle/frozen', requiresWrite: true }, + '/lifecycle stale': { label: 'lifecycle/stale', requiresWrite: true }, + '/lifecycle active': { action: 'remove-stale', requiresWrite: false }, + '/remove-lifecycle frozen': { action: 'remove-frozen', requiresWrite: true }, + '/remove-lifecycle stale': { action: 'remove-stale', requiresWrite: false } + }; + + // Check if comment contains a lifecycle command + const commandKey = Object.keys(commands).find(cmd => + comment === cmd || comment.startsWith(cmd + ' ') + ); + + if (!commandKey) { + console.log('No lifecycle command found in comment'); + return; + } + + const commandConfig = commands[commandKey]; + const issue_number = context.issue.number; + + // Check user permissions for restricted commands + if (commandConfig.requiresWrite) { + if (debugOnly) { + console.log(`Would check permissions for user ${context.payload.comment.user.login} for ${commandKey}`); + } else { + try { + const { data: userPermission } = await github.rest.repos.getCollaboratorPermissionLevel({ + owner: context.repo.owner, + repo: context.repo.repo, + username: context.payload.comment.user.login + }); + + const hasWriteAccess = ['admin', 'write', 'maintain'].includes(userPermission.permission); + + if (!hasWriteAccess) { + console.log(`User ${context.payload.comment.user.login} does not have permission for ${commandKey}`); + await github.rest.reactions.createForIssueComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: context.payload.comment.id, + content: '-1' + }); + return; + } + console.log(`User ${context.payload.comment.user.login} has ${userPermission.permission} access`); + } catch (error) { + console.log('Error checking permissions:', error.message); + return; + } + } + } + + // Handle remove commands + if (commandConfig.action && commandConfig.action.startsWith('remove-')) { + const labelToRemove = commandConfig.action === 'remove-stale' ? 'lifecycle/stale' : 'lifecycle/frozen'; + + if (debugOnly) { + console.log(`Would remove ${labelToRemove} label from issue #${issue_number}`); + console.log('Would react with 👍 to comment'); + } else { + try { + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + name: labelToRemove + }); + console.log(`Removed ${labelToRemove} label`); + + await github.rest.reactions.createForIssueComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: context.payload.comment.id, + content: '+1' + }); + } catch (error) { + console.log(`Label ${labelToRemove} not found or already removed`); + } + } + } + // Handle add label commands + else if (commandConfig.label) { + if (debugOnly) { + console.log(`Would add ${commandConfig.label} label to issue #${issue_number}`); + console.log('Would react with 👍 to comment'); + } else { + try { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + labels: [commandConfig.label] + }); + console.log(`Added ${commandConfig.label} label`); + + await github.rest.reactions.createForIssueComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: context.payload.comment.id, + content: '+1' + }); + } catch (error) { + console.log(`Error adding label: ${error.message}`); + } + } + } + + stale: + runs-on: ubuntu-latest + if: github.event_name == 'schedule' + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0 + with: + ascending: false + operations-per-run: 30 + + # Exempt labels - issues/PRs with these labels will never be marked stale + exempt-issue-labels: 'kind/help-wanted,status/need-more-info,status/needs-analysis,lifecycle/frozen' + exempt-pr-labels: 'kind/help-wanted,status/need-more-info,status/needs-analysis,lifecycle/frozen' + + # Use lifecycle/stale label to match existing convention + stale-issue-label: 'lifecycle/stale' + stale-pr-label: 'lifecycle/stale' + + # Stale messages + stale-issue-message: | + There hasn't been any activity on this issue for a long time. If the problem is still relevant, **add a comment** to keep it open. Otherwise, this issue will be automatically closed in 14 days. + + **To remove the stale label:** Comment `/lifecycle active` + **To freeze (requires write access):** Comment `/lifecycle frozen` + stale-pr-message: | + Thanks for the PR. We'd like to make our product docs better, but haven't been able to review all the suggestions. As our docs change often and quickly diverge, we do not have the bandwidth to review and rebase old PRs. + + If the updates are still relevant, please rebase your PR against the latest version of the docs and **add a comment** when it's ready. This helps our maintainers focus on active contributions. If there's no activity, this PR will be closed in 30 days. + + **To remove the stale label:** Comment `/lifecycle active` + **To freeze (requires write access):** Comment `/lifecycle frozen` + + # Close messages + close-issue-message: | + Closing this issue as there hasn't been any activity for a long time. + + If the problem is still relevant, please **open a new issue** and complete the issue template so we can capture the details required to investigate further. This helps our maintainers focus on active issues. + close-pr-message: | + Closing this PR as there hasn't been any activity for a long time. + + If the updates are still relevant, please review our [contribution guidelines](https://github.com/docker/docs/blob/main/CONTRIBUTING.md) and **create a new PR** against the latest version of our docs. + + # Timing configuration NOTE: If you change days-before-issue-close or + # days-before-pr-close, also update the hardcoded values in the + # stale-issue-message and stale-pr-message above to match. + days-before-issue-stale: 180 # 6 months + days-before-pr-stale: 180 # 6 months + days-before-issue-close: 14 # 2 weeks after stale + days-before-pr-close: 30 # 1 month after stale + + # Debug mode - set to true for dry-run testing (no actual changes) + debug-only: false diff --git a/.github/workflows/sync-cli-docs.yml b/.github/workflows/sync-cli-docs.yml new file mode 100644 index 00000000000..382a0f86fad --- /dev/null +++ b/.github/workflows/sync-cli-docs.yml @@ -0,0 +1,131 @@ +name: sync-cli-docs + +on: + schedule: + # Run daily at 02:00 UTC + - cron: '0 2 * * *' + workflow_dispatch: + inputs: + version: + description: "(optional) Docker CLI version - defaults to docker_ce_version in hugo.yaml" + required: false + default: "" + pull_request: + paths: + - '.github/workflows/sync-cli-docs.yml' + - 'hack/sync-cli-docs.sh' + +permissions: + contents: write + pull-requests: write + +env: + BRANCH_NAME: "bot/sync-cli-docs" + +jobs: + sync-cli-docs: + runs-on: ubuntu-24.04 + steps: + - + name: Checkout docs repo + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + with: + fetch-depth: 0 + - + name: Get version from hugo.yaml + id: get-version + run: | + if [ -n "${{ inputs.version }}" ]; then + VERSION="${{ inputs.version }}" + else + VERSION=v$(grep "docker_ce_version:" hugo.yaml | awk '{print $2}' | tr -d '"') + fi + + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "Docker CLI version: **$VERSION**" | tee -a "$GITHUB_STEP_SUMMARY" + - + name: Checkout docker/cli repo + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + with: + repository: docker/cli + path: cli-source + ref: ${{ steps.get-version.outputs.version }} + fetch-depth: 0 + - + name: Create update branch + id: create-branch + env: + BRANCH_NAME: ${{ env.BRANCH_NAME }} + run: | + git checkout -b "$BRANCH_NAME" + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + - + name: Run sync script + id: sync + run: | + set +e + ./hack/sync-cli-docs.sh HEAD cli-source + EXIT_CODE=$? + set -e + + if [ $EXIT_CODE -eq 0 ]; then + echo "changes=true" >> "$GITHUB_OUTPUT" + echo "Changes detected - syncing CLI docs" >> "$GITHUB_STEP_SUMMARY" + elif [ $EXIT_CODE -eq 100 ]; then + echo "changes=false" >> "$GITHUB_OUTPUT" + echo "No changes to sync - CLI docs are up to date" >> "$GITHUB_STEP_SUMMARY" + else + echo "::error::Script failed with exit code $EXIT_CODE" + exit $EXIT_CODE + fi + + - + name: Show PR + if: steps.sync.outputs.changes == 'true' + run: | + git show "${{ env.BRANCH_NAME }}" + - + name: Create or update Pull Request + if: steps.sync.outputs.changes == 'true' && github.event_name != 'pull_request' + env: + GH_TOKEN: ${{ github.token }} + BRANCH_NAME: ${{ env.BRANCH_NAME }} + PR_TITLE: "cli: sync docs with cli ${{ steps.get-version.outputs.version }}" + PR_BODY: | + ## Summary + + Automated sync of CLI documentation from docker/cli repository. + + **CLI Version:** ${{ steps.get-version.outputs.version }} + + --- + + > [!IMPORTANT] + > **Reviewer:** Please close and reopen this PR to trigger CI checks. + > See: https://docs.github.com/en/actions/how-tos/write-workflows/choose-when-workflows-run/trigger-a-workflow#triggering-a-workflow-from-a-workflow + run: | + # Check for existing open PR from this branch + EXISTING_PR=$(gh pr list --state open --head "$BRANCH_NAME" --json url --jq ".[0].url // empty") + + if [ -n "$EXISTING_PR" ]; then + echo "Updating existing PR: $EXISTING_PR" >> "$GITHUB_STEP_SUMMARY" + git push -u origin "$BRANCH_NAME" --force + gh pr edit "$EXISTING_PR" --title "$PR_TITLE" --body "$PR_BODY" + else + # Check if a closed PR with the same title already exists + CLOSED_PR=$(gh pr list --state closed --search "$PR_TITLE in:title" --json url --jq ".[0].url // empty") + if [ -n "$CLOSED_PR" ]; then + echo "A closed PR already exists for this version: $CLOSED_PR" >> "$GITHUB_STEP_SUMMARY" + echo "Skipping PR creation." + exit 0 + fi + + echo "Creating new PR" >> "$GITHUB_STEP_SUMMARY" + git push -u origin "$BRANCH_NAME" + gh pr create \ + --title "$PR_TITLE" \ + --body "$PR_BODY" \ + --base main \ + --head "$BRANCH_NAME" + fi diff --git a/.github/workflows/validate-upstream.yml b/.github/workflows/validate-upstream.yml index 0ac2645c76a..727b552ea43 100644 --- a/.github/workflows/validate-upstream.yml +++ b/.github/workflows/validate-upstream.yml @@ -34,12 +34,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 with: repository: docker/docs - name: Download data files - uses: actions/download-artifact@v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 if: ${{ inputs.data-files-id != '' && inputs.data-files-folder != '' }} with: name: ${{ inputs.data-files-id }} @@ -51,7 +51,7 @@ jobs: # that folder. If not, create a placeholder stub file for the data file. name: Copy data files if: ${{ inputs.data-files-id != '' && inputs.data-files-folder != '' }} - uses: actions/github-script@v7 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 with: script: | const fs = require('fs'); @@ -61,7 +61,7 @@ jobs: const yamlSrcFilename = path.basename(yamlSrcPath); const yamlSrcNoExt = yamlSrcPath.replace(".yaml", ""); const hasSubCommands = (await (await glob.create(yamlSrcNoExt)).glob()).length > 1; - const yamlDestPath = path.join('data', `${{ inputs.data-files-folder }}`, yamlSrcFilename); + const yamlDestPath = path.join('data', 'cli', `${{ inputs.data-files-folder }}`, yamlSrcFilename); let placeholderPath = path.join("content/reference/cli", yamlSrcFilename.replace('_', '/').replace(/\.yaml$/, '.md')); if (hasSubCommands) { placeholderPath = placeholderPath.replace('.md', '/_index.md'); @@ -84,22 +84,19 @@ jobs: } - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4 with: version: ${{ env.SETUP_BUILDX_VERSION }} driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }} - name: Validate - uses: docker/bake-action@v6 + uses: docker/bake-action@82490499d2e5613fcead7e128237ef0b0ea210f7 # v7 with: source: . files: | docker-bake.hcl targets: validate-upstream provenance: false - set: | - *.cache-from=type=gha,scope=docs-upstream - *.cache-to=type=gha,scope=docs-upstream env: UPSTREAM_MODULE_NAME: ${{ inputs.module-name }} UPSTREAM_REPO: ${{ github.repository }} diff --git a/.gitignore b/.gitignore index 72f90137613..7c450914435 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,19 @@ +.hugo_build.lock +hugo_stats.json +.idea/ +.vscode/mcp.json +.vscode/settings.json +.vscode/tasks.json **/.DS_Store **/desktop.ini -.vscode node_modules -.hugo_build.lock -resources public -tmp +resources static/pagefind -.idea/ +tmp +# Binary installed by cagent-action in CI +cagent +# cagent tmp files +.cagent +.pr-body.md +.validation.log diff --git a/.htmltest.yml b/.htmltest.yml index e7cb321e1be..ea2330397f4 100644 --- a/.htmltest.yml +++ b/.htmltest.yml @@ -9,6 +9,8 @@ IgnoreDirectoryMissingTrailingSlash: true IgnoreURLs: - "^/reference/api/hub/.*$" - "^/reference/api/engine/v.+/#.*$" +- "^/reference/api/registry/.*$" +- "^/pagefind/.*$" IgnoreDirs: - "registry/configuration" - "compose/compose-file" # temporarily ignore until upstream is fixed diff --git a/.markdownlint.json b/.markdownlint.json index 58ab5995dd8..eecf08fddf6 100644 --- a/.markdownlint.json +++ b/.markdownlint.json @@ -13,12 +13,13 @@ "no-space-in-code": true, "no-space-in-links": true, "no-empty-links": true, - "ol-prefix": {"style": "ordered"}, + "ol-prefix": { "style": "one_or_ordered" }, "no-reversed-links": true, "reference-links-images": { "shortcut_syntax": false }, "fenced-code-language": true, "table-pipe-style": true, - "table-column-count": true + "table-column-count": true, + "descriptive-link-text": { "prohibited_texts": ["click here","here","link","more","learn more","find out more"]} } diff --git a/.vale-rdjsonl.tmpl b/.vale-rdjsonl.tmpl new file mode 100644 index 00000000000..662f973385c --- /dev/null +++ b/.vale-rdjsonl.tmpl @@ -0,0 +1,31 @@ +{{- /* Range over the linted files */ -}} + +{{- range .Files}} + +{{- $path := .Path -}} + +{{- /* Range over the file's alerts */ -}} + +{{- range .Alerts -}} + +{{- $error := "" -}} +{{- if eq .Severity "error" -}} + {{- $error = "ERROR" -}} +{{- else if eq .Severity "warning" -}} + {{- $error = "WARNING" -}} +{{- else -}} + {{- $error = "INFO" -}} +{{- end}} + +{{- /* Variables setup */ -}} + +{{- $line := printf "%d" .Line -}} +{{- $col := printf "%d" (index .Span 0) -}} +{{- $check := printf "%s" .Check -}} +{{- $message := printf "%s" .Message -}} + +{{- /* Output */ -}} + +{"message": "[{{ $check }}] {{ $message | jsonEscape }}", "location": {"path": "{{ $path }}", "range": {"start": {"line": {{ $line }}, "column": {{ $col }}}}}, "severity": "{{ $error }}"} +{{end -}} +{{end -}} diff --git a/.vale.ini b/.vale.ini index 710e13b2ff2..fb5939740ff 100644 --- a/.vale.ini +++ b/.vale.ini @@ -1,8 +1,38 @@ StylesPath = _vale MinAlertLevel = suggestion - +IgnoredScopes = text.frontmatter, code, tt, b, strong, i, a Vocab = Docker +# Disable rules for genered content +[content/reference/**/**.md] +Vale.Spelling = NO +Vale.Terms = NO +Docker.Capitalization = NO + +[content/manuals/*/release-notes/*.md] +Vale.Spelling = NO +Vale.Terms = NO +Docker.Capitalization = NO +Docker.We = NO + +[content/manuals/*/release-notes.md] +Vale.Spelling = NO +Vale.Terms = NO +Docker.Capitalization = NO +Docker.We = NO + +[content/contribute/*.md] +Vale.Spelling = NO +Vale.Terms = NO +Docker.Capitalization = NO +Docker.Exclamation = NO + +[content/manuals/desktop/previous-versions/*.md] +Vale.Spelling = NO +Vale.Terms = NO +Docker.Capitalization = NO +Docker.Exclamation = NO + [*.md] BasedOnStyles = Vale, Docker # Exclude `{{< ... >}}`, `{{% ... %}}`, [Who]({{< ... >}}) diff --git a/.vscode/docker.code-snippets b/.vscode/docker.code-snippets new file mode 100644 index 00000000000..1c2817d4183 --- /dev/null +++ b/.vscode/docker.code-snippets @@ -0,0 +1,67 @@ +{ + "Insert Hugo Note Admonition": { + "prefix": ["admonition", "note"], + "body": ["> [!NOTE]", "> $1"], + "description": "Insert a Hugo note admonition", + }, + "Insert Hugo Important Admonition": { + "prefix": ["admonition", "important"], + "body": ["> [!IMPORTANT]", "> $1"], + "description": "Insert a Hugo important admonition", + }, + "Insert Hugo Warning Admonition": { + "prefix": ["admonition", "warning"], + "body": ["> [!WARNING]", "> $1"], + "description": "Insert a Hugo warning admonition", + }, + "Insert Hugo Tip Admonition": { + "prefix": ["admonition", "tip"], + "body": ["> [!TIP]", "> $1"], + "description": "Insert a Hugo tip admonition", + }, + "Insert Hugo Tabs": { + "prefix": ["admonition", "tabs"], + "body": [ + "", + "{{< tabs group=\"$1\" >}}", + "{{< tab name=\"$2\">}}", + "", + "$3", + "", + "{{< /tab >}}", + "{{< tab name=\"$4\">}}", + "", + "$5", + "", + "{{< /tab >}}", + "{{</tabs >}}", + "", + ], + "description": "Insert a Hugo tabs block with two tabs and snippet stops for names and content", + }, + "Insert Hugo code block (no title)": { + "prefix": ["codeblock", "block"], + "body": ["```${1:json}", "$2", "```", ""], + "description": "Insert a Hugo code block with an optional title", + }, + "Insert Hugo code block (with title)": { + "prefix": ["codeblock", "codettl", "block"], + "body": ["```${1:json} {title=\"$2\"}", "$3", "```", ""], + "description": "Insert a Hugo code block with an optional title", + }, + "Insert a Button": { + "prefix": ["button"], + "body": ["{{< button url=\"$1\" text=\"$2\" >}}"], + "description": "Insert a Hugo button", + }, + "Insert Visual Studio Code": { + "prefix": ["vscode", "vs"], + "body": ["Visual Studio Code"], + "description": "Insert 'Visual Studio Code'", + }, + "Insert reusable snippet": { + "prefix": ["include","reuse"], + "body": ["{{% include \"$1\" %}}"], + "description": "Insert a reusable snippet stored in the `includes` folder", + } +} \ No newline at end of file diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000000..1fe05f7039f --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,226 @@ +# AGENTS.md + +Instructions for AI agents working on Docker documentation. +This site builds https://docs.docker.com/ using Hugo. + +## Project structure + +``` +content/ # Documentation source (Markdown + Hugo front matter) +├── manuals/ # Product docs (Engine, Desktop, Hub, etc.) +├── guides/ # Task-oriented guides +├── reference/ # API and CLI reference +└── includes/ # Reusable snippets +layouts/ # Hugo templates and shortcodes +data/ # YAML data files (CLI reference, etc.) +assets/ # CSS (Tailwind v4) and JS (Alpine.js) +static/ # Images, fonts +_vendor/ # Vendored Hugo modules (read-only) +``` + +## URL prefix stripping + +The `/manuals` prefix is stripped from published URLs: +`content/manuals/desktop/install.md` becomes `/desktop/install/` on the live +site. + +When writing internal cross-references in source files, keep the `/manuals/` +prefix in the path — Hugo requires the full source path. The stripping only +affects the published URL, not the internal link target. Anchor links must +exactly match the generated heading ID (Hugo lowercases and slugifies +headings). + +## Vendored content (do not edit) + +Content in `_vendor/` and CLI reference data in `data/cli/` are vendored +from upstream repos. Content pages under `content/reference/cli/` are +generated from `data/cli/` YAML. Do not edit any of these files — changes +must go to the source repository: + +| Content | Source repo | +|---------|-------------| +| CLI reference (`docker`, `docker build`, etc.) | docker/cli | +| Buildx reference | docker/buildx | +| Compose reference | docker/compose | +| Model Runner reference | docker/model-runner | +| Dockerfile reference | moby/buildkit | +| Engine API reference | moby/moby | + +If a validation failure or broken link traces back to vendored content, note +the upstream repo that needs fixing. Do not attempt to fix it locally. + +## Writing guidelines + +Read and follow [STYLE.md](STYLE.md) and [COMPONENTS.md](COMPONENTS.md). +These contain all style rules, shortcode syntax, and front matter requirements. + +### Style violations to avoid + +Every piece of writing must avoid these words and patterns (enforced by Vale): + +- Hedge words: "simply", "easily", "just", "seamlessly" +- Meta-commentary: "it's worth noting", "it's important to understand" +- "allows you to" or "enables you to" — use "lets you" or rephrase +- "we" — use "you" or "Docker" +- "click" — use "select" +- Bold for emphasis or product names — only bold UI elements +- Time-relative language: "currently", "new", "recently", "now" + +### Version-introduction notes + +Explicit version anchors ("Starting with Docker Desktop version X...") are +different from time-relative language — they mark when a feature was +introduced, which is permanently true. + +- Recent releases (~6 months): leave version callouts in place +- Old releases: consider removing if the callout adds little value +- When in doubt, keep the callout and flag for maintainer review + +### Vale gotchas + +- Use lowercase "config" in prose — `vale.Terms` flags a capital-C "Config" + +## Alpine.js patterns + +Do not combine Alpine's `x-show` with the HTML `hidden` attribute on the +same element. `x-show` toggles inline `display` styles, but `hidden` applies +`display: none` via the user-agent stylesheet — the element stays hidden +regardless of `x-show` state. Use `x-cloak` for pre-Alpine hiding instead. +The site defines `[x-cloak=""] { display: none !important }` in `global.css`. + +## Front matter requirements + +Every content page under `content/` requires: + +- `title:` — page title +- `description:` — short description for SEO/previews +- `keywords:` — list of search keywords (omitting this fails markdownlint) + +Additional common fields: + +- `linkTitle:` — sidebar label (keep under 30 chars) +- `weight:` — ordering within a section + +## Hugo shortcodes + +Shortcodes are defined in `layouts/shortcodes/`. Syntax reference is in +COMPONENTS.md. Wrong shortcode syntax fails silently during build but +produces broken HTML — always check COMPONENTS.md for correct syntax. + +## Commands + +```sh +npx prettier --write <file> # Format before committing +docker buildx bake validate # Run all validation checks +docker buildx bake lint # Markdown linting only +docker buildx bake vale # Style guide checks only +docker buildx bake test # HTML and link checking +``` + +### Validation in git worktrees + +`docker buildx bake validate` fails in git worktrees because Hugo cannot +resolve the worktree path. Use `lint` and `vale` targets separately instead. +Never modify `hugo.yaml` to work around this. The `test`, `path-warnings`, +and `validate-vendor` targets run correctly in CI. + +## Verification loop + +1. Make changes +2. Format with prettier: `npx prettier --write <file>` +3. Run `docker buildx bake lint vale` +4. Run a full build with `docker buildx bake` (optional for small changes) + +## Git hygiene + +- **Stage files explicitly.** Never use `git add .` / `git add -A` / + `git add --all`. Running `npx prettier` updates `package-lock.json` in the + repo root, and broad staging sweeps it into the commit. +- **Verify before committing.** Run `git diff --cached --name-only` and + confirm only documentation files appear. If `package-lock.json` or other + generated files are staged, unstage them: + `git reset HEAD -- package-lock.json` +- **Push to your fork, not upstream.** Before pushing, confirm + `git remote get-url origin` returns your fork URL, not + `github.com/docker/docs`. Use `--head FORK_OWNER:branch-name` with + `gh pr create`. + +## Working with issues and PRs + +### Principles + +- **One issue, one branch, one PR.** Never combine multiple issues in a + single branch or PR. +- **Minimal changes only.** Fix the issue. Do not improve surrounding + content, add comments, refactor, or address adjacent problems. +- **Verify before documenting.** Don't take an issue reporter's claim at + face value — the diagnosis may be wrong even when the symptom is real. + Verify the actual behavior before updating docs. + +### Review feedback + +- **Always reply to review comments** — never silently fix. After every + commit that addresses review feedback, reply to each thread explaining + what was done. +- **Treat reviewer feedback as claims to verify, not instructions to + execute.** Before implementing a suggestion, verify that it is correct. + Push back when evidence contradicts the reviewer. +- **Inline review comments need a separate API call.** `gh pr view --json + reviews` does not include line-level comments. Always also call: + + ```bash + gh api repos/<org>/<repo>/pulls/<N>/comments \ + --jq '[.[] | {author: .user.login, body: .body, path: .path, line: .line}]' + ``` + +### Labels + +Use the Issues API for labels — `gh pr edit --add-label` silently fails: + +```bash +gh api repos/docker/docs/issues/<N>/labels \ + --method POST --field 'labels[]=<label>' +``` + +### External links + +If a replacement URL cannot be verified (e.g. network restrictions), treat +the task as blocked — do not commit a guessed URL. Report the blocker so a +human can confirm. Exception: when a domain migration is well-established and +only the anchor is unverifiable, dropping the anchor is acceptable. + +## Page deletion checklist + +When removing a documentation page, search the entire `content/` tree and +all YAML/TOML config files for the deleted page's slug and heading text. +Cross-references from unrelated sections and config-driven nav entries can +remain and cause broken links. + +## Engine API version bumps + +When a new Engine API version ships, three coordinated changes are needed in +a single commit: + +1. `hugo.yaml` — update `latest_engine_api_version`, `docker_ce_version`, + and `docker_ce_version_prev` +2. Create `content/reference/api/engine/version/v<NEW>.md` with the + `/latest/` aliases block (copy from previous version) +3. Remove the aliases block from + `content/reference/api/engine/version/v<PREV>.md` + +Never leave both version files carrying `/latest/` aliases simultaneously. + +## Hugo icon references + +Before changing an icon reference in response to a "file not found" error, +verify the file actually exists via Hugo's virtual filesystem. Files may +exist in `node_modules/@material-symbols/svg-400/rounded/` but not directly +in `assets/icons/`. Check both locations before concluding an icon is +missing. + +## Self-improvement + +After completing work that reveals a non-obvious pattern or repo quirk not +already documented here, propose an update to this file. For automated +sessions, note the learning in a comment on the issue. For human-supervised +sessions, discuss with the user whether to update CLAUDE.md directly. diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 00000000000..47dc3e3d863 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +AGENTS.md \ No newline at end of file diff --git a/COMPONENTS.md b/COMPONENTS.md new file mode 100644 index 00000000000..69eeef37518 --- /dev/null +++ b/COMPONENTS.md @@ -0,0 +1,632 @@ +# Docker Documentation Components Guide + +This guide explains how to use components, shortcodes, and special features +when writing Docker documentation. For writing style and grammar, see +[STYLE.md](STYLE.md). + +## Front matter + +Every documentation page requires front matter at the top of the file. + +### Required fields + +```yaml +--- +title: Install Docker Engine on Ubuntu +description: Instructions for installing Docker Engine on Ubuntu +keywords: requirements, apt, installation, ubuntu +--- +``` + +| Field | Description | +| ----------- | ------------------------------------------------------------ | +| title | Page title, used in `<h1>` and `<title>` tag | +| description | SEO description (150-160 characters), added to HTML metadata | +| keywords | Comma-separated keywords for SEO | + +### Optional fields + +| Field | Description | +| --------------- | ------------------------------------------------------------------ | +| linkTitle | Shorter title for navigation and sidebar (if different from title) | +| weight | Controls sort order in navigation (lower numbers appear first) | +| aliases | List of URLs that redirect to this page | +| toc_min | Minimum heading level in table of contents (default: 2) | +| toc_max | Maximum heading level in table of contents (default: 3) | +| notoc | Set to `true` to disable table of contents | +| sitemap | Set to `false` to exclude from search engine indexing | +| sidebar.badge | Add badge to sidebar (see [Badges](#badges)) | +| sidebar.reverse | Reverse sort order of pages in section | +| sidebar.goto | Override sidebar link URL | + +### Example with optional fields + +```yaml +--- +title: Install Docker Engine on Ubuntu +linkTitle: Install on Ubuntu +description: Instructions for installing Docker Engine on Ubuntu +keywords: requirements, apt, installation, ubuntu, install, uninstall +weight: 10 +aliases: + - /engine/installation/linux/ubuntu/ + - /install/linux/ubuntu/ +toc_max: 4 +params: + sidebar: + badge: + color: blue + text: Beta +--- +``` + +### Series (guide) pages + +Section pages under `content/guides/` automatically use the `series` layout +(via a Hugo cascade in `hugo.yaml`). Series pages support additional front +matter parameters for the metadata card: + +```yaml +--- +title: Getting started +description: Learn the basics of Docker +summary: | + A longer summary shown on the series landing page. +params: + proficiencyLevel: Beginner + time: 15 minutes + prerequisites: None +--- +``` + +| Field | Description | +| ---------------- | ---------------------------------------- | +| summary | Extended description for the series page | +| proficiencyLevel | Skill level (Beginner, Intermediate) | +| time | Estimated time to complete | +| prerequisites | Prerequisites or "None" | + +## Shortcodes + +Shortcodes are reusable components that add rich functionality to your +documentation. + +### Tabs + +Use tabs for platform-specific instructions or content variations. + +**Example:** + +{{< tabs >}} +{{< tab name="Linux" >}} + +```console +$ docker run hello-world +``` + +{{< /tab >}} +{{< tab name="macOS" >}} + +```console +$ docker run hello-world +``` + +{{< /tab >}} +{{< tab name="Windows" >}} + +```powershell +docker run hello-world +``` + +{{< /tab >}} +{{< /tabs >}} + +**Syntax:** + +```markdown +{{</* tabs */>}} +{{</* tab name="Linux" */>}} +Content for Linux +{{</* /tab */>}} +{{</* tab name="macOS" */>}} +Content for macOS +{{</* /tab */>}} +{{</* /tabs */>}} +``` + +**Tab groups (synchronized selection):** + +Use the `group` attribute to synchronize tab selection across multiple tab +sections: + +```markdown +{{</* tabs group="os" */>}} +{{</* tab name="Linux" */>}} +Linux content for first section +{{</* /tab */>}} +{{</* tab name="macOS" */>}} +macOS content for first section +{{</* /tab */>}} +{{</* /tabs */>}} + +{{</* tabs group="os" */>}} +{{</* tab name="Linux" */>}} +Linux content for second section +{{</* /tab */>}} +{{</* tab name="macOS" */>}} +macOS content for second section +{{</* /tab */>}} +{{</* /tabs */>}} +``` + +When a user selects "Linux" in the first section, all tabs in the "os" group +switch to "Linux". + +### Accordion + +Use accordions for collapsible content like optional steps or advanced +configuration. + +**Example:** + +{{< accordion title="Advanced configuration" >}} + +```yaml +version: "3.8" +services: + web: + build: . + ports: + - "8000:8000" +``` + +{{< /accordion >}} + +**Syntax:** + +```markdown +{{</* accordion title="Advanced configuration" */>}} +Content inside the accordion +{{</* /accordion */>}} +``` + +### Include + +Reuse content across multiple pages using the include shortcode. Include +files must be in the `content/includes/` directory. + +**Syntax:** + +```markdown +{{</* include "filename.md" */>}} +``` + +**Example:** + +```markdown +{{</* include "install-prerequisites.md" */>}} +``` + +### Badges + +Use badges to highlight new, beta, experimental, or deprecated content. + +**Example:** + +{{< badge color=blue text="Beta" >}} +{{< badge color=violet text="Experimental" >}} +{{< badge color=green text="New" >}} +{{< badge color=gray text="Deprecated" >}} + +**Syntax:** + +Inline badge: + +```markdown +{{</* badge color=blue text="Beta" */>}} +``` + +Badge as link: + +```markdown +[{{</* badge color=blue text="Beta feature" */>}}](link-to-page.md) +``` + +Sidebar badge (in front matter): + +```yaml +--- +title: Page title +params: + sidebar: + badge: + color: violet + text: Experimental +--- +``` + +**Color options:** + +- `blue` - Beta content +- `violet` - Experimental or early access +- `green` - New GA content +- `amber` - Warning or attention +- `red` - Critical +- `gray` - Deprecated + +**Usage guidelines:** + +- Use badges for no longer than 2 months post-release +- Use violet for experimental/early access features +- Use blue for beta features +- Use green for new GA features +- Use gray for deprecated features + +### Summary bars + +Summary bars indicate subscription requirements, version requirements, or +administrator-only features at the top of a page. + +**Example:** + +{{< summary-bar feature_name="Docker Scout" >}} + +**Setup:** + +1. Add feature to `/data/summary.yaml`: + +```yaml +features: + Docker Scout: + subscription: Business + availability: GA + requires: "Docker Desktop 4.17 or later" +``` + +2. Add shortcode to page: + +```markdown +{{</* summary-bar feature_name="Docker Scout" */>}} +``` + +**Attributes in summary.yaml:** + +| Attribute | Description | Values | +| ------------ | ------------------------------------- | ------------------------------------------------- | +| subscription | Subscription tier required | All, Personal, Pro, Team, Business | +| availability | Product development stage | Experimental, Beta, Early Access, GA, Retired | +| requires | Minimum version requirement | String describing version (link to release notes) | +| for | Indicates administrator-only features | Administrators | + +### Buttons + +Create styled buttons for calls to action. + +**Syntax:** + +```markdown +{{</* button text="Download Docker Desktop" url="/get-docker/" */>}} +``` + +### Cards + +Create card layouts for organizing content. + +**Syntax:** + +```markdown +{{</* card + title="Get started" + description="Learn Docker basics" + link="/get-started/" +*/>}} +``` + +### Icons + +Use Material Symbols icons in your content. + +**Syntax:** + +```markdown +{{</* icon name="check_circle" */>}} +``` + +Browse available icons at +[Material Symbols](https://fonts.google.com/icons). + +## Callouts + +Use GitHub-style callouts to emphasize important information. Use sparingly - +only when information truly deserves special emphasis. + +**Syntax:** + +```markdown +> [!NOTE] +> This is a note with helpful context. + +> [!TIP] +> This is a helpful suggestion or best practice. + +> [!IMPORTANT] +> This is critical information users must understand. + +> [!WARNING] +> This warns about potential issues or consequences. + +> [!CAUTION] +> This is for dangerous operations requiring extreme care. +``` + +**When to use each type:** + +| Type | Use for | Color | +| --------- | -------------------------------------------------------------- | ------ | +| Note | Information that may not apply to all readers or is tangential | Blue | +| Tip | Helpful suggestions or best practices | Green | +| Important | Issues of moderate magnitude | Yellow | +| Warning | Actions that may cause damage or data loss | Red | +| Caution | Serious risks | Red | + +## Code blocks + +Format code with syntax highlighting using triple backticks and language +hints. + +### Language hints + +Common language hints: + +- `console` - Interactive shell with `$` prompt +- `bash` - Bash scripts +- `dockerfile` - Dockerfiles +- `yaml` - YAML files +- `json` - JSON data +- `go`, `python`, `javascript` - Programming languages +- `powershell` - PowerShell commands + +**Interactive shell example:** + +````markdown +```console +$ docker run hello-world +``` +```` + +**Bash script example:** + +````markdown +```bash +#!/usr/bin/bash +echo "Hello from Docker" +``` +```` + +### Variables in code + +Use `<VARIABLE_NAME>` format for placeholder values: + +````markdown +```console +$ docker tag <IMAGE_NAME> <REGISTRY>/<IMAGE_NAME>:<TAG> +``` +```` + +This syntax renders variables in a special color and font style. + +### Highlighting lines + +Highlight specific lines with `hl_lines`: + +````markdown +```go {hl_lines=[3,4]} +func main() { + fmt.Println("Hello") + fmt.Println("This line is highlighted") + fmt.Println("This line is highlighted") +} +``` +```` + +### Collapsible code blocks + +Make long code blocks collapsible: + +````markdown +```dockerfile {collapse=true} +# syntax=docker/dockerfile:1 +FROM golang:1.21-alpine +RUN apk add --no-cache git +# ... more lines +``` +```` + +## Images + +Add images using standard Markdown syntax with optional query parameters for +sizing and borders. + +**Basic syntax:** + +```markdown +![Alt text description](/assets/images/image.png) +``` + +**With size parameters:** + +```markdown +![Alt text](/assets/images/image.png?w=600&h=400) +``` + +**With border:** + +```markdown +![Alt text](/assets/images/image.png?border=true) +``` + +**Combined parameters:** + +```markdown +![Alt text](/assets/images/image.png?w=600&h=400&border=true) +``` + +**Best practices:** + +- Store images in `/static/assets/images/` +- Use descriptive alt text for accessibility +- Compress images before adding to repository +- Capture only relevant UI, avoid unnecessary whitespace +- Use realistic text, not lorem ipsum +- Remove unused images from repository + +## Videos + +Embed videos using HTML video tags or platform-specific embeds. + +**Local video:** + +```html +<video controls width="100%"> + <source src="/assets/videos/demo.mp4" type="video/mp4" /> +</video> +``` + +**YouTube embed:** + +```html +<iframe + width="560" + height="315" + src="https://www.youtube.com/embed/VIDEO_ID" + frameborder="0" + allow="autoplay; encrypted-media" + allowfullscreen +> +</iframe> +``` + +## Links + +Use standard Markdown link syntax. For internal links, use relative paths +with source filenames. + +**External link:** + +```markdown +[Docker Hub](https://hub.docker.com/) +``` + +**Internal link (same section):** + +```markdown +[Installation guide](install.md) +``` + +**Internal link (different section):** + +```markdown +[Get started](/get-started/overview.md) +``` + +**Link to heading:** + +```markdown +[Prerequisites](#prerequisites) +``` + +**Best practices:** + +- Use descriptive link text (around 5 words) +- Front-load important terms +- Avoid generic text like "click here" or "learn more" +- Don't include end punctuation in link text +- Use relative paths for internal links + +## Lists + +### Unordered lists + +```markdown +- First item +- Second item +- Third item + - Nested item + - Another nested item +``` + +### Ordered lists + +```markdown +1. First step +2. Second step +3. Third step + 1. Nested step + 2. Another nested step +``` + +### Best practices + +- Limit bulleted lists to 5 items when possible +- Don't add commas or semicolons to list item ends +- Capitalize list items for ease of scanning +- Make all list items parallel in structure +- For nested sequential lists, use lowercase letters (a, b, c) + +## Tables + +Use standard Markdown table syntax with sentence case for headings. + +**Example:** + +```markdown +| Feature | Description | Availability | +| -------------- | ------------------------------- | ------------ | +| Docker Compose | Multi-container orchestration | All | +| Docker Scout | Security vulnerability scanning | Business | +| Build Cloud | Remote build service | Pro, Team | +``` + +**Best practices:** + +- Use sentence case for table headings +- Don't leave cells empty - use "N/A" or "None" +- Align decimals on the decimal point +- Keep tables scannable - avoid dense content + +## Quick reference + +### File structure + +```plaintext +content/ +├── manuals/ # Product documentation +│ ├── docker-desktop/ +│ ├── docker-hub/ +│ └── ... +├── guides/ # Learning guides +├── reference/ # API and CLI reference +└── includes/ # Reusable content snippets +``` + +### Common patterns + +**Platform-specific instructions:** + +Use tabs with consistent names: Linux, macOS, Windows + +**Optional content:** + +Use accordions for advanced or optional information + +**Version/subscription indicators:** + +Use badges or summary bars + +**Important warnings:** + +Use callouts (NOTE, WARNING, CAUTION) + +**Code examples:** + +Use `console` for interactive shells, appropriate language hints for code diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2ce05f85a89..22766bb85dd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,11 +3,14 @@ We value documentation contributions from the Docker community. We'd like to make it as easy as possible for you to work in this repository. -Our style guide and instructions on using our page templates and components is -available in the [contribution section](https://docs.docker.com/contribute/) on -the website. +Before contributing, review our documentation standards: -The following guidelines describe the ways in which you can contribute to the +- [STYLE.md](STYLE.md) - Writing style, voice, grammar, and formatting + guidelines +- [COMPONENTS.md](COMPONENTS.md) - How to use Hugo shortcodes, front matter, + and components + +The following guidelines describe how to contribute to the Docker documentation at <https://docs.docker.com/>, and how to get started. ## Reporting issues @@ -91,6 +94,9 @@ To stop the development server: 1. In your terminal, press `<Ctrl+C>` to exit the file watch mode of Compose. 2. Stop the Compose service with the `docker compose down` command. +> [!NOTE] +> Alternatively, if you have installed Hugo, you can build with `hugo serve`. + ### Testing Before you push your changes and open a pull request, we recommend that you @@ -105,13 +111,17 @@ If this command doesn't result in any errors, you're good to go! ## Content not edited here -CLI reference documentation is maintained in upstream repositories. It's -partially generated from code, and is only vendored here for publishing. To -update the CLI reference docs, refer to the corresponding repository: +CLI reference documentation is maintained in upstream repositories and +generated from YAML data files in `data/cli/`. A Hugo content adapter +(`content/reference/cli/_content.gotmpl`) turns these data files into pages +automatically. To update the CLI reference docs, refer to the corresponding +repository: - [docker/cli](https://github.com/docker/cli) - [docker/buildx](https://github.com/docker/buildx) - [docker/compose](https://github.com/docker/compose) +- [docker/model-runner](https://github.com/docker/model-runner) +- [docker/mcp-gateway](https://github.com/docker/mcp-gateway) Feel free to raise an issue on this repository if you're not sure how to proceed, and we'll help out. diff --git a/Dockerfile b/Dockerfile index 0fbed160d30..a2c96589c4e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,13 @@ # syntax=docker/dockerfile:1 # check=skip=InvalidBaseImagePlatform -ARG ALPINE_VERSION=3.21 -ARG GO_VERSION=1.24 +ARG ALPINE_VERSION=3.23 +ARG GO_VERSION=1.26 ARG HTMLTEST_VERSION=0.17.0 -ARG HUGO_VERSION=0.141.0 -ARG NODE_VERSION=22 -ARG PAGEFIND_VERSION=1.3.0 +ARG VALE_VERSION=3.11.2 +ARG HUGO_VERSION=0.161.1 +ARG NODE_VERSION=24 +ARG PAGEFIND_VERSION=1.5.2 # base defines the generic base stage FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base @@ -14,7 +15,8 @@ RUN apk add --no-cache \ git \ nodejs \ npm \ - gcompat + gcompat \ + rsync # npm downloads Node.js dependencies FROM base AS npm @@ -48,16 +50,23 @@ ARG HUGO_ENV="development" ARG DOCS_URL="https://docs.docker.com" ENV HUGO_CACHEDIR="/tmp/hugo_cache" RUN --mount=type=cache,target=/tmp/hugo_cache \ - hugo --gc --minify -e $HUGO_ENV -b $DOCS_URL + hugo \ + --gc \ + --minify \ + --panicOnWarning \ + --printPathWarnings \ + --printUnusedTemplates \ + -b $DOCS_URL \ + -e $HUGO_ENV +RUN ./hack/flatten-and-resolve.js public # lint lints markdown files -FROM davidanson/markdownlint-cli2:v0.14.0 AS lint -USER root +FROM ghcr.io/igorshubovych/markdownlint-cli:v0.45.0 AS lint RUN --mount=type=bind,target=. \ - /usr/local/bin/markdownlint-cli2 \ + markdownlint \ "content/**/*.md" \ - "#content/manuals/engine/release-notes/*.md" \ - "#content/manuals/desktop/previous-versions/*.md" + --ignore "content/manuals/engine/release-notes/*.md" \ + --ignore "content/manuals/desktop/previous-versions/*.md" # test validates HTML output and checks for broken links FROM wjdp/htmltest:v${HTMLTEST_VERSION} AS test @@ -66,6 +75,23 @@ COPY --from=build /project/public ./public ADD .htmltest.yml .htmltest.yml RUN htmltest +# vale +FROM jdkato/vale:v${VALE_VERSION} AS vale-run +WORKDIR /src +ARG GITHUB_ACTIONS +RUN --mount=type=bind,target=.,rw <<EOT + set -e + mkdir /out + args="" + [ "$GITHUB_ACTIONS" = "true" ] && args="--output=.vale-rdjsonl.tmpl" + set -x + vale sync + vale $args content/ | tee /out/vale.out +EOT + +FROM scratch AS vale +COPY --from=vale-run /out/vale.out / + # update-modules downloads and vendors Hugo modules FROM build-base AS update-modules # MODULE is the Go module path and version of the module to update @@ -74,8 +100,6 @@ RUN <<"EOT" set -ex if [ -n "$MODULE" ]; then hugo mod get ${MODULE} - RESOLVED=$(cat go.mod | grep -m 1 "${MODULE/@*/}" | awk '{print $1 "@" $2}') - go mod edit -replace "${MODULE/@*/}=${RESOLVED}"; else echo "no module set"; fi @@ -87,6 +111,22 @@ FROM scratch AS vendor COPY --from=update-modules /project/_vendor /_vendor COPY --from=update-modules /project/go.* / +FROM base AS validate-vendor +RUN --mount=target=/context \ + --mount=type=bind,from=vendor,target=/out \ + --mount=target=.,type=tmpfs <<EOT +set -e +rsync -a /context/. . +git add -A +rm -rf _vendor +cp -rf /out/* . +if [ -n "$(git status --porcelain -- go.mod go.sum _vendor)" ]; then + echo >&2 'ERROR: Vendor result differs. Please vendor your package with "make vendor"' + git status --porcelain -- go.mod go.sum _vendor + exit 1 +fi +EOT + # build-upstream builds an upstream project with a replacement module FROM build-base AS build-upstream # UPSTREAM_MODULE_NAME is the canonical upstream repository name and namespace (e.g. moby/buildkit) @@ -112,18 +152,6 @@ RUN apk add --no-cache fd ripgrep WORKDIR /test RUN --mount=type=bind,target=. ./hack/test/unused_media -# path-warnings checks for duplicate target paths -FROM build-base AS path-warnings -RUN hugo --printPathWarnings > ./path-warnings.txt -RUN <<EOT -DUPLICATE_TARGETS=$(grep "Duplicate target paths" ./path-warnings.txt) -if [ ! -z "$DUPLICATE_TARGETS" ]; then - echo "$DUPLICATE_TARGETS" - echo "You probably have a duplicate alias defined. Please check your aliases." - exit 1 -fi -EOT - # pagefind installs the Pagefind runtime FROM base AS pagefind ARG PAGEFIND_VERSION diff --git a/README.md b/README.md index 39500fb3882..13f534ebbc6 100644 --- a/README.md +++ b/README.md @@ -1,52 +1,40 @@ # Docs @ Docker -<img src="static/assets/images/docker-docs.png" alt="Welcome to Docker Documentation" style="max-width: 50%;"> +<div align="center"> +<img src="static/assets/images/docker-docs.svg" alt="Welcome to Docker Documentation"> +</div> +<br/> +<br/> -Welcome to the Docker Documentation repository. This is the source for -[https://docs.docker.com/](https://docs.docker.com/). +Welcome to the Docker Documentation repository. This is the source for the [Docker Docs Website](https://docs.docker.com/). -Feel free to send us pull requests and file issues. Our docs are completely -open source, and we deeply appreciate contributions from the Docker community! +Feel free to open pull requests or issues. Our docs are completely open source, and we deeply appreciate contributions from the Docker community! ## Provide feedback -We’d love to hear your feedback. Please file documentation issues only in the -Docs GitHub repository. You can file a new issue to suggest improvements or if -you see any errors in the existing documentation. +We’d love to hear your feedback! To submit feedback: -Before submitting a new issue, check whether the issue has already been -reported. You can join the discussion using an emoji, or by adding a comment to -an existing issue. If possible, we recommend that you suggest a fix to the issue -by creating a pull request. +- Click **[New issue](https://github.com/docker/docs/issues/new)** on the docs repository, or +- Click **Request changes** in the right column of every page on + [docs.docker.com](https://docs.docker.com/), or +- Click **Give feedback** on every page in the docs. -You can ask general questions and get community support through the [Docker -Community Slack](https://dockr.ly/comm-slack). Personalized support is available +To get community support, use the [Docker Community Slack](https://dockr.ly/comm-slack). Personalized support is available through the Docker Pro, Team, and Business subscriptions. See [Docker -Pricing](https://www.docker.com/pricing) for details. +Pricing](https://www.docker.com/pricing?ref=Docs&refAction=DocsReadme) for details. If you have an idea for a new feature or behavior change in a specific aspect of -Docker or have found a product bug, file that issue in the project's code +Docker or have found a product bug, file an issue in the project's repository. -We've made it easy for you to file new issues. - -- Click **[New issue](https://github.com/docker/docs/issues/new)** on the docs repository and fill in the details, or -- Click **Request docs changes** in the right column of every page on - [docs.docker.com](https://docs.docker.com/) and add the details, or - - ![Request changes link](/static/assets/images/docs-site-feedback.png) - -- Click the **Give feedback** link on the side of every page in the docs. - - ![Docs feedback on each page](/static/assets/images/feedback-widget.png) - ## Contribute to Docker docs -We value your contribution. We want to make it as easy as possible to submit -your contributions to the Docker docs repository. Changes to the docs are -handled through pull requests against the `main` branch. To learn how to -contribute, see [CONTRIBUTING.md](CONTRIBUTING.md). +We welcome contributions! To get started: + +- [CONTRIBUTING.md](CONTRIBUTING.md) - Contribution workflow and setup +- [STYLE.md](STYLE.md) - Writing style and content guidelines +- [COMPONENTS.md](COMPONENTS.md) - Component and shortcode usage ## Copyright and license -Copyright 2013-2025 Docker, Inc., released under the <a href="https://github.com/docker/docs/blob/main/LICENSE">Apache 2.0 license</a> . +Copyright 2013-2026 Docker, Inc., released under the [Apache 2.0 license](https://github.com/docker/docs/blob/main/LICENSE). diff --git a/STYLE.md b/STYLE.md new file mode 100644 index 00000000000..e5c9a869140 --- /dev/null +++ b/STYLE.md @@ -0,0 +1,566 @@ +# Docker Documentation Style Guide + +This guide consolidates voice, grammar, formatting, and terminology +standards for Docker documentation. Follow these guidelines to create +clear, consistent, and helpful content. For instructions on how to use +components, shortcodes, and other features, see [COMPONENTS.md](COMPONENTS.md). + +## Voice and tone + +Write like a knowledgeable colleague explaining something useful. We're +developers writing for developers. + +### Core principles: The 4Cs + +1. **Correct** - Information is accurate +2. **Concise** - Succinct without unnecessary words +3. **Complete** - Includes enough detail to complete the task +4. **Clear** - Easy to understand + +### Writing approach + +- **Be honest.** Give all the facts. Don't use misdirection or + ambiguous statements. +- **Be concise.** Don't bulk up communication with fluffy words or + complex metaphors. Get to the point. +- **Be relaxed.** Casual but not lazy, smart but not arrogant, focused + but not cold. Be welcoming and warm. +- **Be inclusive.** Every person is part of our community, regardless + of experience level. + +### Tone guidelines + +- Use a natural, friendly, and respectful tone +- Use contractions to sound conversational (it's, you're, don't) +- Avoid overdoing politeness - skip "please" in most technical documentation +- Be clear over comical +- Use positive language - emphasize what users can do, not what they can't + +**Positive language example:** + +Instead of: "Features such as Single Sign-on (SSO), Image Access +Management, Registry Access Management are not available in Docker Team +subscription." + +Use: "Features such as Single Sign-on (SSO), Image Access Management, +Registry Access Management are available in Docker Business subscription." + +### Avoiding marketing language + +Documentation should be factual and direct, not promotional. + +**Avoid hedge words** that overstate ease or capability: + +- ❌ simply, just, easily, seamlessly (implies ease when it may not be + easy) +- ❌ robust, powerful (marketing language) +- ✅ Instead: describe what it actually does + +**Avoid excessive enthusiasm:** + +- ❌ "powerful feature", "game-changing", "revolutionary", "amazing" +- ✅ Instead: describe the feature and its benefits factually + +**Avoid meta-commentary:** + +These phrases add no value - state the fact directly: + +- ❌ "It's worth noting that..." +- ❌ "It's important to understand that..." +- ❌ "Keep in mind that..." +- ✅ Instead: state the information directly + +### Voice and perspective + +- **Use "you" not "we"**: Focus on what the user can do, not what "we" + created + - ❌ "We provide a feature that helps you deploy" + - ✅ "Deploy your applications with..." +- **Avoid "please"**: Don't use in normal explanations or "please note" + phrases +- **Write timelessly**: Avoid "currently" or "as of this writing" - the + documentation describes the product as it is today + +### Scope preservation + +When updating existing documentation, resist the urge to expand +unnecessarily. Users value brevity. + +**Understand the current document:** +Read it fully to grasp its scope, length, and character. Is it a minimal +how-to or a comprehensive reference? + +**Match the existing character:** +If the document is brief and direct (90 lines), keep it that way. Don't +transform a focused guide into an exhaustive tutorial. + +**Add only what's genuinely missing:** +Fill gaps, don't elaborate. If the document already covers a topic +adequately, don't expand it. + +**Value brevity:** +Say what needs to be said, then stop. Not every topic needs +prerequisites, troubleshooting, best practices, and examples sections. + +**Respect the original intent:** +The document exists in its current form for a reason. Improve it, don't +remake it. + +Good additions fill genuine gaps. Bad additions change the document's +character. When in doubt, add less rather than more. + +## Grammar and style + +Write in US English with US grammar. + +### Acronyms and initialisms + +- Spell out lesser-known acronyms on first use, then follow with the + acronym in parentheses: "single sign-on (SSO)" +- Don't spell out common acronyms like URL, HTML, API +- Use all caps for file type acronyms: JPEG, PNG +- Don't use apostrophes for plurals: URLs not URL's +- Avoid introducing acronyms in headings - introduce them in the body + text that follows + +### Capitalization + +Use sentence case for almost everything: headings, titles, links, buttons, +navigation labels. + +- Capitalize Docker solutions: Docker Desktop, Docker Hub, Docker Extensions +- Capitalize job titles only when they immediately precede a name: + "Chief Executive Officer Scott Johnston" but "Scott Johnston, chief + executive officer" +- Follow company capitalization preferences: DISH, bluecrux +- Match UI capitalization when referring to specific interface elements + +### Contractions + +Use contractions to maintain a conversational tone, but don't overdo it. + +- Stay consistent - don't switch between "you are" and "you're" in the + same content +- Avoid negative contractions when possible (can't, don't, won't) - + rewrite to be positive +- Never contract a noun with a verb (your container is ready, not your + container's ready) + +### Dangling modifiers + +Avoid unclear subjects: + +- ❌ After enabling auto-log-out, your users are logged out +- ✅ When you enable auto-log-out, your users are logged out + +### Dates and numbers + +- Use US date format: June 26, 2021 or Jun. 26, 2021 +- Spell out numbers one through nine, except in units: 4 MB +- Use numerals for 10 and above: 10, 625, 1000 +- Use decimals instead of fractions: 0.5 not ½ +- For numbers with five or more digits, use spaces instead of commas: + 14 586 not 14,586 +- For version numbers: version 4.8.2, v1.0, Docker Hub API v2 + +### Punctuation + +- **Commas:** Use the Oxford comma (serial comma) in lists +- **Colons:** Use to introduce lists or provide explanations +- **Semicolons:** Don't use - write two sentences instead +- **Em dashes:** Use sparingly with spaces on either side: "text - like + this - text" +- **Hyphens:** Use for compound adjectives before nouns: + "up-to-date documentation" +- **Exclamation marks:** Avoid +- **Parentheses:** Avoid in technical documentation - they reduce + readability + +### Conciseness and redundant phrases + +Remove unnecessary words to make documentation clearer and more direct. + +**Eliminate redundant phrases:** + +- ❌ "in order to" → ✅ "to" +- ❌ "serves the purpose of" → ✅ state what it does directly +- ❌ "allows you to" → ✅ "lets you" or state what it does +- ❌ "enables you to" → ✅ "lets you" or state what it does + +### Bold and italics + +Use bold **only** for UI elements (buttons, menus, field labels). Never +use bold for emphasis, product names, or feature names. + +- ✅ Select **Save** (UI button - use bold) +- ✅ Docker Hub provides storage (product name - no bold) +- ✅ This is important to understand (emphasis - no bold) +- ❌ **Docker Desktop** is a tool (product name - don't bold) +- ❌ This is **very important** (emphasis - don't bold) +- ❌ The **build** command (command name - don't bold) + +**Italics:** +Use italics sparingly, as this formatting can be difficult to read in +digital experiences. Notable exceptions are titles of articles, blog +posts, or specification documents. + +## Formatting + +### Content types and detail balance + +Different content types require different writing approaches. Match your +style and detail level to the content type. + +**Getting Started / Tutorials:** + +- Step-by-step instructions +- Assume beginner knowledge +- Explain _why_ at each step +- Include more context and explanation + +**How-to Guides:** + +- Task-focused and goal-oriented +- Assume intermediate knowledge +- Focus on _how_ efficiently +- Less explanation, more direct steps + +**Reference Documentation:** + +- Comprehensive and exhaustive +- Assume advanced knowledge +- Focus on _what_ precisely +- Complete parameter lists and options + +**Concept Explanations:** + +- Educational and foundational +- Any skill level +- Focus on _understanding_ over doing +- Theory before practice + +**Match detail to context:** + +- **First mention** of a concept: explain it +- **Subsequent mentions**: link to the explanation or use the term + directly if recently explained +- **Common knowledge** (to your audience): state it, don't explain it +- **Edge cases**: mention them but don't let them dominate the main flow + +### Headings + +- Keep headings short (no more than eight words) +- Front-load the most important words +- Use descriptive, action-oriented language +- Skip leading articles (a, the) +- Avoid puns, teasers, and cultural references +- Page titles should be action-oriented: "Install Docker Desktop", + "Enable SCIM" + +### Page structure and flow + +Every page should answer two questions in the first paragraph: + +1. **What will I learn?** - State what the page covers +2. **Why does this matter?** - Explain the benefit or use case + +**Good opening:** + +```markdown +Docker Compose Watch automatically updates your running containers when +you change code. This eliminates the manual rebuild-restart cycle during +development. +``` + +**Weak opening:** + +```markdown +Docker Compose Watch is a powerful feature that enables developers to +streamline their development workflow by providing automatic +synchronization capabilities. +``` + +**Transitions and flow:** +Connect ideas naturally. Each section should flow logically from the +previous one. Save detailed discussions for after showing basic usage - +don't front-load complexity. + +**Good flow:** + +1. Prerequisites +2. Basic usage (show the simple case first) +3. Advanced options (add complexity after basics are clear) + +**Jarring flow:** + +1. Prerequisites +2. Overview of all capabilities (too much before seeing it work) +3. Basic usage + +**Avoid structural problems:** + +- Don't start multiple sentences or sections with the same structure + (varies pacing and improves readability) +- Don't over-explain obvious things (trust the reader's competence) +- Don't use "**Bold:** format" for subsection labels (use plain text + with colon) + +### Lists + +- Limit bulleted lists to five items when possible +- Don't add commas or semicolons to list item ends +- Capitalize list items for ease of scanning +- Make all list items parallel in structure +- Start each item with a capital letter unless it's a parameter or + command +- For nested sequential lists, use lowercase letters: 1. Top level, + a. Child step + +**Avoid marketing-style list formatting:** + +Don't use "**Term** - Description" format, which reads like marketing +copy: + +❌ **Bad example:** + +- **Build** - Creates images from Dockerfiles +- **Run** - Starts containers from images +- **Push** - Uploads images to registries + +✅ **Good alternatives:** + +Use simple descriptive bullets: + +- Build images from Dockerfiles +- Start containers from images +- Upload images to registries + +Or use proper prose when appropriate: +"Docker lets you build images from Dockerfiles, start containers from +images, and upload images to registries." + +### Code and commands + +Format Docker commands, instructions, filenames, and package names as +inline code using backticks: `docker build` + +**Code example pattern:** + +Follow this three-step pattern for code examples: + +1. **State** what you're doing (brief) +2. **Show** the command or code +3. **Explain** the result or key parts (if not obvious) + +**Example:** + +````markdown +Build your image: + +```console +$ docker build -t my-app . +``` + +This creates an image tagged `my-app` using the Dockerfile in the +current directory. +```` + +**When to show command output:** + +- Show output when it helps understanding +- Show output when users need to verify results +- Don't show output for commands with obvious results +- Don't show output when it's not relevant to the point + +For code block syntax, language hints, variables, and advanced features +(line highlighting, collapsible blocks), see +[COMPONENTS.md](COMPONENTS.md#code-blocks). + +### Links + +- Use plain, descriptive language (around five words) +- Front-load important terms at the beginning +- Avoid generic calls to action like "click here" or "find out more" +- Don't include end punctuation in link text +- Use relative links with source filenames for internal links +- Don't make link text bold or italic unless it would be in normal + body copy + +### Images + +**Best practices:** + +- Capture only relevant UI - don't include unnecessary white space +- Use realistic text, not lorem ipsum +- Keep images small and focused +- Compress images before adding to repository +- Remove unused images from repository + +For image syntax and parameters (sizing, borders), see +[COMPONENTS.md](COMPONENTS.md#images). + +### Callouts + +Use callouts sparingly to emphasize important information. Use them only +when information truly deserves special emphasis - for warnings, critical +notes, or important context that affects how users approach a task. + +Callout types: Note (informational), Tip (helpful suggestion), Important +(moderate issue), Warning (potential damage/loss), Caution (serious risk). + +For syntax and detailed usage guidelines, see +[COMPONENTS.md](COMPONENTS.md#callouts). + +### Tables + +- Use sentence case for table headings +- Don't leave cells empty - use "N/A" or "None" if needed +- Align decimals on the decimal point + +### Navigation instructions + +- Use location, then action: "From the **Visibility** drop-down list, select Public" +- Be brief and specific: "Select **Save**" +- Start with the reason if a step needs one: "To view changes, select the link" + +### Optional steps + +Start optional steps with "Optional." followed by the instruction: + +_1. Optional. Enter a description for the job._ + +### File types and units + +- Use formal file type names: "a PNG file" not "a .png file" +- Use abbreviated units with spaces: "10 GB" not "10GB" or + "10 gigabytes" + +## Word list + +### Recommended terms + +| Use | Don't use | +| ------------------------------------- | ---------------------- | +| lets | allows | +| turn on, toggle on | enable | +| turn off, toggle off | disable | +| select | click | +| following | below | +| previous | above | +| checkbox | check box | +| username | account name | +| sign in | sign on, log in, login | +| sign up, create account | register | +| for example, such as | e.g. | +| run | execute | +| want | wish | +| Kubernetes | K8s | +| repository | repo | +| administrator (first use), admin (UI) | - | +| and | & (ampersand) | +| move through, navigate to | scroll | +| (be more precise) | respectively | +| versus | vs, vs. | +| use | utilize | +| help | facilitate | + +### Version numbers + +- Use "earlier" not "lower": Docker Desktop 4.1 and earlier +- Use "later" not "higher" or "above": Docker Desktop 4.1 and later + +### Quick transformations + +Common phrases to transform for clearer, more direct writing: + +| Instead of... | Write... | +| ------------------------ | ---------------------------- | +| In order to build | To build | +| This allows you to build | This lets you build / Build | +| Simply run the command | Run the command | +| We provide a feature | Docker provides / You can | +| Utilize the API | Use the API | +| This facilitates testing | This helps test / This tests | +| Click the button | Select the button | +| It's worth noting that X | X (state it directly) | + +### UI elements + +- **tab vs view** - Use "view" for major UI sections, "tab" for + sub-sections +- **toggle** - You "turn on" or "turn off" a toggle + +## Docker terminology + +### Products and features + +- **Docker Compose** - The application or all functionality associated + with it +- **`docker compose`** - Use code formatting for commands +- **Docker Compose CLI** - The family of Compose commands from + Docker CLI +- **Compose plugin** - The add-on for Docker CLI that can be + enabled/disabled +- **compose.yaml** - Current designation for the Compose file (use + code formatting) + +### Technical terms + +- **Digest** - Long string automatically created when you push an + image +- **Member** - A user of Docker Hub who is part of an organization +- **Namespace** - Organization or user name; every image needs a + namespace +- **Node** - Physical or virtual machine running Docker Engine in + swarm mode + - Manager nodes: Perform swarm management and orchestration + - Worker nodes: Execute tasks +- **Registry** - Online storage for Docker images +- **Repository** - Lets users share container images with team, + customers, or community + +### Platform terminology + +- **Multi-platform** - Broad: Mac/Linux/Windows; Narrow: Linux/amd64 + and Linux/arm64 +- **Multi-architecture (multi-arch)** - CPU architecture or + hardware-architecture-based; don't use as synonym for multi-platform + +## Quick reference + +### Voice checklist + +- ✅ Direct and practical +- ✅ Conversational with active voice +- ✅ Specific and actionable +- ❌ Corporate-speak +- ❌ Condescending +- ❌ Overly formal + +### Structure checklist + +- ✅ Answer "What will I learn?" in the first paragraph +- ✅ Answer "Why does this matter?" in the first paragraph +- ✅ Front-load important information in headings and lists +- ✅ Connect ideas naturally between sections +- ✅ Use examples to clarify concepts +- ✅ Match content type (tutorial vs how-to vs reference vs concept) +- ✅ Follow code example pattern: state → show → explain +- ✅ Preserve document scope (don't unnecessarily expand) + +### Common mistakes + +- ❌ Using "click" instead of "select" +- ❌ Using "below" instead of "following" +- ❌ Starting sentences with numbers (recast the sentence) +- ❌ Using apostrophes in plural acronyms +- ❌ Leaving table cells empty +- ❌ Using commas in large numbers (use spaces) +- ❌ Referring to file types by extension (.png file) +- ❌ Using bold for product names or emphasis (only for UI elements) +- ❌ Using "**Term** - Description" format in lists +- ❌ Using hedge words (simply, easily, just, seamlessly) +- ❌ Using meta-commentary (it's worth noting that...) +- ❌ Using "we" instead of "you" or "Docker" +- ❌ Showing command output when it's not needed +- ❌ Not explaining what users will learn in first paragraph diff --git a/_vale/.vale-config/0-Hugo.ini b/_vale/.vale-config/0-Hugo.ini deleted file mode 100644 index 4347ca9e902..00000000000 --- a/_vale/.vale-config/0-Hugo.ini +++ /dev/null @@ -1,10 +0,0 @@ -[*.md] -# Exclude `{{< ... >}}`, `{{% ... %}}`, [Who]({{< ... >}}) -TokenIgnores = ({{[%<] .* [%>]}}.*?{{[%<] ?/.* [%>]}}), \ -(\[.+\]\({{< .+ >}}\)), \ -[^\S\r\n]({{[%<] \w+ .+ [%>]}})\s, \ -[^\S\r\n]({{[%<](?:/\*) .* (?:\*/)[%>]}})\s - -# Exclude `{{< myshortcode `This is some <b>HTML</b>, ... >}}` -BlockIgnores = (?sm)^({{[%<] \w+ [^{]*?\s[%>]}})\n$, \ -(?s) *({{< highlight [^>]* ?>}}.*?{{< ?/ ?highlight >}}) diff --git a/_vale/Docker/Acronyms.yml b/_vale/Docker/Acronyms.yml deleted file mode 100644 index 476d8937d5b..00000000000 --- a/_vale/Docker/Acronyms.yml +++ /dev/null @@ -1,164 +0,0 @@ -extends: conditional -message: "'%s' has no definition." -link: https://docs.docker.com/contribute/style/grammar/#acronyms-and-initialisms -level: warning -ignorecase: false -# Ensures that the existence of 'first' implies the existence of 'second'. -first: '\b([A-Z]{2,5})\b' -second: '(?:\b[A-Z][a-z]+ )+\(([A-Z]{2,5})s?\)' -# ... with the exception of these: -exceptions: - - ACH - - AGPL - - AI - - API - - ARM - - ARP - - ASP - - AUFS - - AWS - - BIOS - - BPF - - BSD - - CFS - - CI - - CIDR - - CISA - - CLI - - CNCF - - CORS - - CPU - - CSS - - CSV - - CUDA - - CVE - - DAD - - DCT - - DEBUG - - DHCP - - DNS - - DOM - - DPI - - DSOS - - DVP - - ECI - - ELK - - FAQ - - FPM - - FUSE - - GB - - GCC - - GDB - - GET - - GHSA - - GNOME - - GNU - - GPG - - GPL - - GPU - - GRUB - - GTK - - GUI - - GUID - - HEAD - - HTML - - HTTP - - HTTPS - - IAM - - IBM - - ID - - IDE - - IP - - IPAM - - IPC - - IT - - JAR - - JIT - - JSON - - JSX - - KDE - - LESS - - LLDB - - LLM - - LTS - - MAC - - MATE - - MCP - - mcp - - MDM - - MDN - - MSI - - NAT - - NET - - NFS - - NOTE - - NTFS - - NTLM - - NUMA - - NVDA - - OCI - - OS - - OSI - - OSS - - PATH - - PDF - - PEM - - PID - - PHP - - POSIX - - POST - - QA - - QEMU - - RAM - - REPL - - REST - - RFC - - RHEL - - RPM - - RSA - - SAML - - SARIF - - SBOM - - SCIM - - SCM - - SCSS - - SCTP - - SDK - - SLES - - SLSA - - SOCKS - - SPDX - - SQL - - SSD - - SSH - - SSL - - SSO - - SVG - - TBD - - TCP - - TCP - - TIP - - TLS - - TODO - - TTY - - TXT - - UDP - - URI - - URL - - USB - - USD - - UTF - - UTS - - UUID - - VAT - - VDI - - VIP - - VLAN - - VM - - VPN - - WSL - - XML - - XSS - - YAML - - ZFS - - ZIP diff --git a/_vale/Docker/Forbidden.yml b/_vale/Docker/Forbidden.yml new file mode 100644 index 00000000000..d8b7a37ae8c --- /dev/null +++ b/_vale/Docker/Forbidden.yml @@ -0,0 +1,6 @@ +extends: substitution +message: "Use '%s' instead of '%s'." +level: error +ignorecase: false +swap: + Docker CE: Docker Engine diff --git a/_vale/Docker/HeadingLength.yml b/_vale/Docker/HeadingLength.yml deleted file mode 100644 index 270ccf80aed..00000000000 --- a/_vale/Docker/HeadingLength.yml +++ /dev/null @@ -1,7 +0,0 @@ -extends: occurrence -message: "Try to keep headings short (< 8 words)." -link: https://docs.docker.com/contribute/style/formatting/#headings-and-subheadings -scope: heading -level: suggestion -max: 8 -token: \b(\w+)\b diff --git a/_vale/Docker/HeadingSentenceCase.yml b/_vale/Docker/HeadingSentenceCase.yml deleted file mode 100644 index b5edebee1b2..00000000000 --- a/_vale/Docker/HeadingSentenceCase.yml +++ /dev/null @@ -1,8 +0,0 @@ -extends: capitalization -message: "Use sentence case for headings: '%s'." -level: warning -scope: heading -match: $sentence -threshold: 0.4 -indicators: - - ":" diff --git a/_vale/Docker/RecommendedWords.yml b/_vale/Docker/RecommendedWords.yml index 2721e0881fb..8c5e526280a 100644 --- a/_vale/Docker/RecommendedWords.yml +++ b/_vale/Docker/RecommendedWords.yml @@ -14,10 +14,8 @@ swap: (?:sign on|log on|log in|logon|login): sign in above: previous adaptor: adapter - admin(?! console): administrator administrate: administer afterwards: afterward - allow: let allows: lets alphabetic: alphabetical alphanumerical: alphanumeric diff --git a/_vale/Docker/SentenceLength.yml b/_vale/Docker/SentenceLength.yml deleted file mode 100644 index 41bcdd12603..00000000000 --- a/_vale/Docker/SentenceLength.yml +++ /dev/null @@ -1,7 +0,0 @@ -extends: occurrence -message: "Write short, concise sentences. (<=40 words)" -scope: sentence -link: https://docs.docker.com/contribute/checklist/ -level: warning -max: 40 -token: \b(\w+)\b diff --git a/_vale/Docker/Units.yml b/_vale/Docker/Units.yml index cc9134fe545..7bb9b737f6d 100644 --- a/_vale/Docker/Units.yml +++ b/_vale/Docker/Units.yml @@ -7,4 +7,4 @@ swap: gigabytes?: GB megabytes?: MB petabytes?: PB - terrabytes?: TB + terabytes?: TB diff --git a/_vale/config/vocabularies/Docker/accept.txt b/_vale/config/vocabularies/Docker/accept.txt index f2621ae3839..bc39fed8844 100644 --- a/_vale/config/vocabularies/Docker/accept.txt +++ b/_vale/config/vocabularies/Docker/accept.txt @@ -1,123 +1,296 @@ (?i)[A-Z]{2,}'?s +jq +zstd +Zstandard +untracked +ripgrep +exfiltration +sandboxing +Adreno +Aleksandrov +Awaitility Amazon Anchore Apple Artifactory +armhf +[Aa]uditability +auditable +autolock +[Aa]llowlist(ing)? Azure +Azure AD +[Bb]ackport +bootup +Bitnami Btrfs +Bugsnag BuildKit +buildkitd BusyBox +cagent +CD CentOS Ceph +cgroup Chrome Chrome DevTools +CI +CI/CD Citrix +classpath +cli +CLI CloudFront Codefresh Codespaces -CouchDB +config +containerd Couchbase +CouchDB +Crowdstrike +[Cc]ybersecurity +datacenter +datasource Datadog Ddosify Debootstrap +denylist +deprovisioning +deserialization +deserialize[d]? Dev -Dex Dev Environments? +Dex +displayName Django +DMR Docker Build Cloud Docker Business -Docker Dasboard +Docker Dashboard Docker Desktop Docker Engine Docker Extension Docker Hub Docker Scout Docker Team -Docker's Docker-Sponsored Open Source +Docker's Dockerfile +dockerignore Dockerize +Dockerized Dockerizing +Duckduckgo Entra +EPERM +ESXi Ethernet +[Ee]xploitability +[Ff]ailover Fargate Fedora +firewalld Flink +fluentd +g?libc GeoNetwork +GGUF Git -GitHub( Actions)? +Gitea +GitHub +GitHub Actions Google +Gradle Grafana Gravatar +gRPC +Groq +Grype +Gunicorn HyperKit -IPv[46] -IPvlan +inferencing +initializer +Initializr +inotify Intel Intune -JFrog +IPsec +iptables +IPv[46] +IPvlan +isort Jamf +JavaScript JetBrains +JFrog JUnit +Kata +Keycloak Kerberos +Kiro Kitematic Kubeadm +kubectl +kubefwd +kubelet Kubernetes -Laravel Laradock +Laravel +libseccomp Linux +Liquibase LinuxKit +logback +Loggly Logstash +lookup Mac +macOS +macvlan Mail(chimp|gun) +mfsymlinks +Micronaut Microsoft +minikube +misconfiguration +[Mm]ixins? +monorepos? +musl MySQL -NFSv\d +nameserver +namespaced? +namespacing +Neovim +Npgsql +netfilter +netlabel +netlink Netplan +NFSv\d Nginx +npm Nutanix Nuxeo +NVIDIA OAuth +Okta Ollama +osquery +osxfs OTel -Okta -PKG Paketo +PAT +perl +pgAdmin +pgx +PKG +plaintext +plist +pluggable Postgres +psycopg +pytest PowerShell Python +Qualcomm +Quickview +rebalance +reimplement +reloader +Rego +Rekor +ROCm +rollback +rootful +rqt +runc +RViz Ryuk S3 -SQLite +scrollable +SELinux Slack +snapshotters? +Sigstore Snyk Solr SonarQube +Splunk +SQLite +stargz +stdin +stdout +subfolder +sudo +subvolume Syft +syntaxes Sysbox +sysctl +sysctls Sysdig +systemd +teleop Testcontainers +tmpfs Traefik +Trivy Trixie +Turtlesim +typesafe Ubuntu +ufw +uv +umask +uncaptured +Uncaptured +unconfigured +undeterminable Unix +unmarshalling +unmanaged +upsert +Visual Studio Code VMware +vpnkit +vSphere +Vulkan +Vue Wasm +Wasmtime Windows +windowsfilter WireMock +workdir +WORKDIR +[Ww]orktrees? +[Pp]assthrough +[Pp]reconfigured Xdebug +xUnit +XQuartz +youki +Yubikey Zscaler Zsh [Aa]nonymized? [Aa]utobuild [Aa]llowlist +[Aa]gentic [Aa]utobuilds? [Aa]utotests? +[Aa]utoscaling [Bb]uildx [Bb]uildpack(s)? [Bb]uildx [Cc]odenames? [Cc]ompose [Cc]onfigs +[dD]eduplicate +[Dd]enylist +[Dd]ev +[Dd]iscoverability [Dd]istroless [Ff]ilepaths? [Ff]iletypes? @@ -138,6 +311,8 @@ Zsh [Pp]rocfs [Pp]roxied [Pp]roxying +[pP]yright +[Rr]eadme [Rr]eal-time [Rr]egex(es)? [Rr]untimes? @@ -153,6 +328,9 @@ Zsh [Ss]warm [Ss]yscalls? [Ss]ysfs +[Tt]eardown +[Tt]echnographic +[Tt]odo [Tt]oolchains? [Uu]narchived? [Uu]ngated @@ -162,54 +340,9 @@ Zsh [Vv]irtiofs [Vv]irtualize [Ww]alkthrough -bootup -cgroup -config -containerd -datacenter -deprovisioning -deserialization -deserialize -displayName -dockerignore -firewalld -g?libc -gRPC -inotify -iptables -kubectl -kubefwd -kubelet -lookup -macOS -macvlan -mfsymlinks -minikube -monorepos? -musl -nameserver -namespace -namespacing -netfilter -netlabel -npm -osquery -osxfs -pgAdmin -rollback -rootful -runc -snapshotters? -stdin -stdout -syntaxes -sysctls -systemd -tmpfs -ufw -uid -umask -unmanaged -vSphere -vpnkit -windowsfilter +[Tt]oolsets? +[Rr]eachability +[Rr]erank(ing|ed)? +[Ee]vals? +[Ll]abspaces? +[Uu]nsloth diff --git a/_vendor/github.com/docker/buildx/docs/bake-reference.md b/_vendor/github.com/docker/buildx/docs/bake-reference.md index d658d891edd..a4f236fa529 100644 --- a/_vendor/github.com/docker/buildx/docs/bake-reference.md +++ b/_vendor/github.com/docker/buildx/docs/bake-reference.md @@ -227,6 +227,8 @@ The following table shows the complete list of attributes that you can assign to | [`description`](#targetdescription) | String | Description of a target | | [`dockerfile-inline`](#targetdockerfile-inline) | String | Inline Dockerfile string | | [`dockerfile`](#targetdockerfile) | String | Dockerfile location | +| [`entitlements`](#targetentitlements) | List | Permissions that the build process requires to run | +| [`extra-hosts`](#targetextra-hosts) | List | Customs host-to-IP mapping | | [`inherits`](#targetinherits) | List | Inherit attributes from other targets | | [`labels`](#targetlabels) | Map | Metadata for images | | [`matrix`](#targetmatrix) | Map | Define a set of variables that forks a target into multiple targets. | @@ -234,6 +236,7 @@ The following table shows the complete list of attributes that you can assign to | [`no-cache-filter`](#targetno-cache-filter) | List | Disable build cache for specific stages | | [`no-cache`](#targetno-cache) | Boolean | Disable build cache completely | | [`output`](#targetoutput) | List | Output destinations | +| [`policy`](#targetpolicy) | List | Policies to validate build sources and metadata | | [`platforms`](#targetplatforms) | List | Target platforms | | [`pull`](#targetpull) | Boolean | Always pull images | | [`secret`](#targetsecret) | List | Secrets to expose to the build | @@ -297,7 +300,12 @@ example adds annotations to both the image index and manifests. ```hcl target "default" { - output = [{ type = "image", name = "foo" }] + output = [ + { + type = "image" + name = "foo" + } + ] annotations = ["index,manifest:org.opencontainers.image.authors=dvdksn"] } ``` @@ -314,11 +322,11 @@ This attribute accepts the long-form CSV version of attestation parameters. target "default" { attest = [ { - type = "provenance", - mode = "max", + type = "provenance" + mode = "max" }, { - type = "sbom", + type = "sbom" } ] } @@ -336,12 +344,12 @@ This takes a list value, so you can specify multiple cache sources. target "app" { cache-from = [ { - type = "s3", - region = "eu-west-1", + type = "s3" + region = "eu-west-1" bucket = "mybucket" }, { - type = "registry", + type = "registry" ref = "user/repo:cache" } ] @@ -360,12 +368,12 @@ This takes a list value, so you can specify multiple cache export targets. target "app" { cache-to = [ { - type = "s3", - region = "eu-west-1", + type = "s3" + region = "eu-west-1" bucket = "mybucket" }, { - type = "inline", + type = "inline" } ] } @@ -445,9 +453,9 @@ a context based on the pattern of the context value. ```hcl # docker-bake.hcl target "app" { - contexts = { - alpine = "docker-image://alpine:3.13" - } + contexts = { + alpine = "docker-image://alpine:3.13" + } } ``` @@ -462,9 +470,9 @@ RUN echo "Hello world" ```hcl # docker-bake.hcl target "app" { - contexts = { - src = "../path/to/source" - } + contexts = { + src = "../path/to/source" + } } ``` @@ -485,12 +493,13 @@ COPY --from=src . . ```hcl # docker-bake.hcl target "base" { - dockerfile = "baseapp.Dockerfile" + dockerfile = "baseapp.Dockerfile" } + target "app" { - contexts = { - baseapp = "target:base" - } + contexts = { + baseapp = "target:base" + } } ``` @@ -507,11 +516,11 @@ functionality. ```hcl target "lint" { - description = "Runs golangci-lint to detect style errors" - args = { - GOLANGCI_LINT_VERSION = null - } - dockerfile = "lint.Dockerfile" + description = "Runs golangci-lint to detect style errors" + args = { + GOLANGCI_LINT_VERSION = null + } + dockerfile = "lint.Dockerfile" } ``` @@ -577,6 +586,20 @@ target "integration-tests" { Entitlements are enabled with a two-step process. First, a target must declare the entitlements it requires. Secondly, when invoking the `bake` command, the user must grant the entitlements by passing the `--allow` flag or confirming the entitlements when prompted in an interactive terminal. This is to ensure that the user is aware of the possibly insecure permissions they are granting to the build process. +### `target.extra-hosts` + +Use the `extra-hosts` attribute to define customs host-to-IP mapping for the +target. This has the same effect as passing a [`--add-host`][add-host] flag to +the build command. + +```hcl +target "default" { + extra-hosts = { + my_hostname = "8.8.8.8" + } +} +``` + ### `target.inherits` A target can inherit attributes from other targets. @@ -861,7 +884,7 @@ This is the same as the `--no-cache` flag for `docker build`. ```hcl target "default" { - no-cache = 1 + no-cache = true } ``` @@ -877,6 +900,21 @@ target "default" { } ``` +### `target.policy` + +Policies to validate build sources and metadata. Each entry uses the same keys +as the `--policy` flag for `docker buildx build` (`filename`, `reset`, +`disabled`, `strict`, `log-level`). Bake also automatically loads +`Dockerfile.rego` alongside the target Dockerfile when present. + +```hcl +target "default" { + policy = [ + { filename = "extra.rego" }, + ] +} +``` + ### `target.platforms` Set target platforms for the build target. @@ -913,8 +951,15 @@ variable "HOME" { target "default" { secret = [ - { type = "env", id = "KUBECONFIG" }, - { type = "file", id = "aws", src = "${HOME}/.aws/credentials" }, + { + type = "env" + id = "KUBECONFIG" + }, + { + type = "file" + id = "aws" + src = "${HOME}/.aws/credentials" + } ] } ``` @@ -1068,7 +1113,9 @@ or interpolate them in attribute values in your Bake file. ```hcl variable "TAG" { + type = string default = "latest" + description = "Tag to use for build" } target "webapp-dev" { @@ -1081,6 +1128,8 @@ You can assign a default value for a variable in the Bake file, or assign a `null` value to it. If you assign a `null` value, Buildx uses the default value from the Dockerfile instead. +You can also add a description of the variable's purpose with the `description` field. This attribute is useful when combined with the `docker buildx bake --list=variables` option, providing a more informative output when listing the available variables in a Bake file. + You can override variable defaults set in the Bake file using environment variables. The following example sets the `TAG` variable to `dev`, overriding the default `latest` value shown in the previous example. @@ -1089,6 +1138,206 @@ overriding the default `latest` value shown in the previous example. $ TAG=dev docker buildx bake webapp-dev ``` +Variables can also be assigned an explicit type. +If provided, it will be used to validate the default value (if set), as well as any overrides. +This is particularly useful when using complex types which are intended to be overridden. +The previous example could be expanded to apply an arbitrary series of tags. +```hcl +variable "TAGS" { + default = ["latest"] + type = list(string) +} + +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = [for tag in TAGS: "docker.io/username/webapp:${tag}"] +} +``` + +This example shows how to generate three tags without changing the file +or using custom functions/parsing: +```console +$ TAGS=dev,latest,2 docker buildx bake webapp-dev +``` + +### Variable typing + +The following primitive types are available: +* `string` +* `number` +* `bool` + +The type is expressed like a keyword; it must be expressed as a literal: +```hcl +variable "OK" { + type = string +} + +# cannot be an actual string +variable "BAD" { + type = "string" +} + +# cannot be the result of an expression +variable "ALSO_BAD" { + type = lower("string") +} +``` +Specifying primitive types can be valuable to show intent (especially when a default is not provided), +but bake will generally behave as expected without explicit typing. + +Complex types are expressed with "type constructors"; they are: +* `tuple([<type>,...])` +* `list(<type>)` +* `set(<type>)` +* `map(<type>)` +* `object({<attr>=<type>},...})` + +The following are examples of each of those, as well as how the (optional) default value would be expressed: +```hcl +# structured way to express "1.2.3-alpha" +variable "MY_VERSION" { + type = tuple([number, number, number, string]) + default = [1, 2, 3, "alpha"] +} + +# JDK versions used in a matrix build +variable "JDK_VERSIONS" { + type = list(number) + default = [11, 17, 21] +} + +# better way to express the previous example; this will also +# enforce set semantics and allow use of set-based functions +variable "JDK_VERSIONS" { + type = set(number) + default = [11, 17, 21] +} + +# with the help of lookup(), translate a 'feature' to a tag +variable "FEATURE_TO_NAME" { + type = map(string) + default = {featureA = "slim", featureB = "tiny"} +} + +# map a branch name to a registry location +variable "PUSH_DESTINATION" { + type = object({branch = string, registry = string}) + default = {branch = "main", registry = "prod-registry.invalid.com"} +} + +# make the previous example more useful with composition +variable "PUSH_DESTINATIONS" { + type = list(object({branch = string, registry = string})) + default = [ + {branch = "develop", registry = "test-registry.invalid.com"}, + {branch = "main", registry = "prod-registry.invalid.com"}, + ] +} +``` +Note that in each example, the default value would be valid even if typing was not present. +If typing was omitted, the first three would all be considered `tuple`; +you would be restricted to functions that operate on `tuple` and, for example, not be able to add elements. +Similarly, the third and fourth would both be considered `object`, with the limits and semantics of that type. +In short, in the absence of a type, any value delimited with `[]` is a `tuple` +and value delimited with `{}` is an `object`. +Explicit typing for complex types not only opens up the ability to use functions applicable to that specialized type, +but is also a precondition for providing overrides. + +> [!NOTE] +> See [HCL Type Expressions][typeexpr] page for more details. + +### Overriding variables + +As mentioned in the [intro to variables](#variable), primitive types (`string`, `number`, and `bool`) +can be overridden without typing and will generally behave as expected. +(When explicit typing is not provided, a variable is assumed to be primitive when the default value lacks `{}` or `[]` delimiters; +a variable with neither typing nor a default value is treated as `string`.) +Naturally, these same overrides can be used alongside explicit typing too; +they may help in edge cases where you want `VAR=true` to be a `string`, where without typing, +it may be a `string` or a `bool` depending on how/where it's used. +Overriding a variable with a complex type can only be done when the type is provided. +This is still done via environment variables, but the values can be provided via CSV or JSON. + +#### CSV overrides + +This is considered the canonical method and is well suited to interactive usage. +It is assumed that `list` and `set` will be the most common complex type, +as well as the most common complex type designed to be overridden. +Thus, there is full CSV support for `list` and `set` +(and `tuple`; despite being considered a structural type, it is more like a collection type in this regard). + + +There is limited support for `map` and `object` and no support for composite types; +for these advanced cases, an alternative mechanism [using JSON](#json-overrides) is available. + +#### JSON overrides + +Overrides can also be provided via JSON. +This is the only method available for providing some complex types and may be convenient if overrides are already JSON +(for example, if they come from a JSON API). +It can also be used when dealing with values are difficult or impossible to specify using CSV (e.g., values containing quotes or commas). +To use JSON, simply append `_JSON` to the variable name. +In this contrived example, CSV cannot handle the second value; despite being a supported CSV type, JSON must be used: +```hcl +variable "VALS" { + type = list(string) + default = ["some", "list"] +} +``` +```console +$ cat data.json +["hello","with,comma","with\"quote"] +$ VALS_JSON=$(< data.json) docker buildx bake + +# CSV equivalent, though the second value cannot be expressed at all +$ VALS='hello,"with""quote"' docker buildx bake +``` + +This example illustrates some precedence and usage rules: +```hcl +variable "FOO" { + type = string + default = "foo" +} + +variable "FOO_JSON" { + type = string + default = "foo" +} +``` + +The variable `FOO` can *only* be overridden using CSV because `FOO_JSON`, which would typically used for a JSON override, +is already a defined variable. +Since `FOO_JSON` is an actual variable, setting that environment variable would be expected to a CSV value. +A JSON override *is* possible for this variable, using environment variable `FOO_JSON_JSON`. + +```Console +# These three are all equivalent, setting variable FOO=bar +$ FOO=bar docker buildx bake <...> +$ FOO='bar' docker buildx bake <...> +$ FOO="bar" docker buildx bake <...> + +# Sets *only* variable FOO_JSON; FOO is untouched +$ FOO_JSON=bar docker buildx bake <...> + +# This also sets FOO_JSON, but will fail due to not being valid JSON +$ FOO_JSON_JSON=bar docker buildx bake <...> + +# These are all equivalent +$ cat data.json +"bar" +$ FOO_JSON_JSON=$(< data.json) docker buildx bake <...> +$ FOO_JSON_JSON='"bar"' docker buildx bake <...> +$ FOO_JSON=bar docker buildx bake <...> + +# This results in setting two different variables, both specified as CSV (FOO=bar and FOO_JSON="baz") +$ FOO=bar FOO_JSON='"baz"' docker buildx bake <...> + +# These refer to the same variable with FOO_JSON_JSON having precedence and read as JSON (FOO_JSON=baz) +$ FOO_JSON=bar FOO_JSON_JSON='"baz"' docker buildx bake <...> +``` + ### Built-in variables The following variables are built-ins that you can use with Bake without having @@ -1101,11 +1350,17 @@ to define them. ### Use environment variable as default -You can set a Bake variable to use the value of an environment variable as a default value: +If an environment variable exists with the same name as a declared Bake +variable, Bake uses that environment variable value instead of the declared +default. + +To disable this environment-based variable lookup, set +`BUILDX_BAKE_DISABLE_VARS_ENV_LOOKUP=1`. + ```hcl variable "HOME" { - default = "$HOME" + default = "/root" } ``` @@ -1169,8 +1424,7 @@ $ docker buildx bake ## Function -A [set of general-purpose functions][bake_stdlib] -provided by [go-cty][go-cty] +A [set of general-purpose functions][bake_stdlib] provided by [go-cty][go-cty] are available for use in HCL files: ```hcl @@ -1208,8 +1462,9 @@ target "webapp-dev" { <!-- external links --> +[add-host]: https://docs.docker.com/reference/cli/docker/buildx/build/#add-host [attestations]: https://docs.docker.com/build/attestations/ -[bake_stdlib]: https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go +[bake_stdlib]: https://github.com/docker/buildx/blob/master/docs/bake-stdlib.md [build-arg]: https://docs.docker.com/reference/cli/docker/image/build/#build-arg [build-context]: https://docs.docker.com/reference/cli/docker/buildx/build/#build-context [cache-backends]: https://docs.docker.com/build/cache/backends/ @@ -1226,4 +1481,5 @@ target "webapp-dev" { [ssh]: https://docs.docker.com/reference/cli/docker/buildx/build/#ssh [tag]: https://docs.docker.com/reference/cli/docker/image/build/#tag [target]: https://docs.docker.com/reference/cli/docker/image/build/#target +[typeexpr]: https://github.com/hashicorp/hcl/tree/main/ext/typeexpr [userfunc]: https://github.com/hashicorp/hcl/tree/main/ext/userfunc diff --git a/_vendor/github.com/docker/buildx/docs/bake-stdlib.md b/_vendor/github.com/docker/buildx/docs/bake-stdlib.md new file mode 100644 index 00000000000..569b072bd30 --- /dev/null +++ b/_vendor/github.com/docker/buildx/docs/bake-stdlib.md @@ -0,0 +1,1554 @@ +--- +title: Bake standard library functions +--- + +<!---MARKER_STDLIB_START--> + +| Name | Description | +|:----------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [`absolute`](#absolute) | If the given number is negative then returns its positive equivalent, or otherwise returns the given number unchanged. | +| [`add`](#add) | Returns the sum of the two given numbers. | +| [`and`](#and) | Applies the logical AND operation to the given boolean values. | +| [`base64decode`](#base64decode) | Decodes a string containing a base64 sequence. | +| [`base64encode`](#base64encode) | Encodes a string to a base64 sequence. | +| [`basename`](#basename) | Returns the last element of a path. | +| [`bcrypt`](#bcrypt) | Computes a hash of the given string using the Blowfish cipher. | +| [`byteslen`](#byteslen) | Returns the total number of bytes in the given buffer. | +| [`bytesslice`](#bytesslice) | Extracts a subslice from the given buffer. | +| [`can`](#can) | Tries to evaluate the expression given in its first argument. | +| [`ceil`](#ceil) | Returns the smallest whole number that is greater than or equal to the given value. | +| [`chomp`](#chomp) | Removes one or more newline characters from the end of the given string. | +| [`chunklist`](#chunklist) | Splits a single list into multiple lists where each has at most the given number of elements. | +| [`cidrhost`](#cidrhost) | Calculates a full host IP address within a given IP network address prefix. | +| [`cidrnetmask`](#cidrnetmask) | Converts an IPv4 address prefix given in CIDR notation into a subnet mask address. | +| [`cidrsubnet`](#cidrsubnet) | Calculates a subnet address within a given IP network address prefix. | +| [`cidrsubnets`](#cidrsubnets) | Calculates many consecutive subnet addresses at once, rather than just a single subnet extension. | +| [`coalesce`](#coalesce) | Returns the first of the given arguments that isn't null, or raises an error if there are no non-null arguments. | +| [`coalescelist`](#coalescelist) | Returns the first of the given sequences that has a length greater than zero. | +| [`compact`](#compact) | Removes all empty string elements from the given list of strings. | +| [`concat`](#concat) | Concatenates together all of the given lists or tuples into a single sequence, preserving the input order. | +| [`contains`](#contains) | Returns true if the given value is a value in the given list, tuple, or set, or false otherwise. | +| [`convert`](#convert) | Converts a value to a specified type constraint, using HCL's customdecode extension for type expression support. | +| [`csvdecode`](#csvdecode) | Parses the given string as Comma Separated Values (as defined by RFC 4180) and returns a map of objects representing the table of data, using the first row as a header row to define the object attributes. | +| [`dirname`](#dirname) | Returns the directory of a path. | +| [`distinct`](#distinct) | Removes any duplicate values from the given list, preserving the order of remaining elements. | +| [`divide`](#divide) | Divides the first given number by the second. | +| [`element`](#element) | Returns the element with the given index from the given list or tuple, applying the modulo operation to the given index if it's greater than the number of elements. | +| [`equal`](#equal) | Returns true if the two given values are equal, or false otherwise. | +| [`flatten`](#flatten) | Transforms a list, set, or tuple value into a tuple by replacing any given elements that are themselves sequences with a flattened tuple of all of the nested elements concatenated together. | +| [`floor`](#floor) | Returns the greatest whole number that is less than or equal to the given value. | +| [`format`](#format) | Constructs a string by applying formatting verbs to a series of arguments, using a similar syntax to the C function \"printf\". | +| [`formatdate`](#formatdate) | Deprecated: use formattimestamp instead. Formats a timestamp given in RFC 3339 syntax into another timestamp in some other machine-oriented time syntax, as described in the format string. | +| [`formatlist`](#formatlist) | Constructs a list of strings by applying formatting verbs to a series of arguments, using a similar syntax to the C function \"printf\". | +| [`formattimestamp`](#formattimestamp) | Formats a timestamp string in RFC 3339 syntax or a unix timestamp integer into another timestamp in some other machine-oriented time syntax, as described in the format string. The special format string "X" returns the unix timestamp in seconds. | +| [`greaterthan`](#greaterthan) | Returns true if and only if the second number is greater than the first. | +| [`greaterthanorequalto`](#greaterthanorequalto) | Returns true if and only if the second number is greater than or equal to the first. | +| [`hasindex`](#hasindex) | Returns true if if the given collection can be indexed with the given key without producing an error, or false otherwise. | +| [`homedir`](#homedir) | Returns the current user's home directory. | +| [`indent`](#indent) | Adds a given number of spaces after each newline character in the given string. | +| [`index`](#index) | Returns the element with the given key from the given collection, or raises an error if there is no such element. | +| [`indexof`](#indexof) | Finds the element index for a given value in a list. | +| [`int`](#int) | Discards any fractional portion of the given number. | +| [`join`](#join) | Concatenates together the elements of all given lists with a delimiter, producing a single string. | +| [`jsondecode`](#jsondecode) | Parses the given string as JSON and returns a value corresponding to what the JSON document describes. | +| [`jsonencode`](#jsonencode) | Returns a string containing a JSON representation of the given value. | +| [`keys`](#keys) | Returns a list of the keys of the given map in lexicographical order. | +| [`length`](#length) | Returns the number of elements in the given collection. | +| [`lessthan`](#lessthan) | Returns true if and only if the second number is less than the first. | +| [`lessthanorequalto`](#lessthanorequalto) | Returns true if and only if the second number is less than or equal to the first. | +| [`log`](#log) | Returns the logarithm of the given number in the given base. | +| [`lookup`](#lookup) | Returns the value of the element with the given key from the given map, or returns the default value if there is no such element. | +| [`lower`](#lower) | Returns the given string with all Unicode letters translated to their lowercase equivalents. | +| [`max`](#max) | Returns the numerically greatest of all of the given numbers. | +| [`md5`](#md5) | Computes the MD5 hash of a given string and encodes it with hexadecimal digits. | +| [`merge`](#merge) | Merges all of the elements from the given maps into a single map, or the attributes from given objects into a single object. | +| [`min`](#min) | Returns the numerically smallest of all of the given numbers. | +| [`modulo`](#modulo) | Divides the first given number by the second and then returns the remainder. | +| [`multiply`](#multiply) | Returns the product of the two given numbers. | +| [`negate`](#negate) | Multiplies the given number by -1. | +| [`not`](#not) | Applies the logical NOT operation to the given boolean value. | +| [`notequal`](#notequal) | Returns false if the two given values are equal, or true otherwise. | +| [`or`](#or) | Applies the logical OR operation to the given boolean values. | +| [`parseint`](#parseint) | Parses the given string as a number of the given base, or raises an error if the string contains invalid characters. | +| [`pow`](#pow) | Returns the given number raised to the given power (exponentiation). | +| [`range`](#range) | Returns a list of numbers spread evenly over a particular range. | +| [`regex`](#regex) | Applies the given regular expression pattern to the given string and returns information about a single match, or raises an error if there is no match. | +| [`regex_replace`](#regex_replace) | Applies the given regular expression pattern to the given string and replaces all matches with the given replacement string. | +| [`regexall`](#regexall) | Applies the given regular expression pattern to the given string and returns a list of information about all non-overlapping matches, or an empty list if there are no matches. | +| [`replace`](#replace) | Replaces all instances of the given substring in the given string with the given replacement string. | +| [`reverse`](#reverse) | Returns the given string with all of its Unicode characters in reverse order. | +| [`reverselist`](#reverselist) | Returns the given list with its elements in reverse order. | +| [`rsadecrypt`](#rsadecrypt) | Decrypts an RSA-encrypted ciphertext. | +| [`sanitize`](#sanitize) | Replaces all non-alphanumeric characters with a underscore, leaving only characters that are valid for a Bake target name. | +| [`semvercmp`](#semvercmp) | Returns true if version satisfies a constraint. | +| [`sethaselement`](#sethaselement) | Returns true if the given set contains the given element, or false otherwise. | +| [`setintersection`](#setintersection) | Returns the intersection of all given sets. | +| [`setproduct`](#setproduct) | Calculates the cartesian product of two or more sets. | +| [`setsubtract`](#setsubtract) | Returns the relative complement of the two given sets. | +| [`setsymmetricdifference`](#setsymmetricdifference) | Returns the symmetric difference of the two given sets. | +| [`setunion`](#setunion) | Returns the union of all given sets. | +| [`sha1`](#sha1) | Computes the SHA1 hash of a given string and encodes it with hexadecimal digits. | +| [`sha256`](#sha256) | Computes the SHA256 hash of a given string and encodes it with hexadecimal digits. | +| [`sha512`](#sha512) | Computes the SHA512 hash of a given string and encodes it with hexadecimal digits. | +| [`signum`](#signum) | Returns 0 if the given number is zero, 1 if the given number is positive, or -1 if the given number is negative. | +| [`slice`](#slice) | Extracts a subslice of the given list or tuple value. | +| [`sort`](#sort) | Applies a lexicographic sort to the elements of the given list. | +| [`split`](#split) | Produces a list of one or more strings by splitting the given string at all instances of a given separator substring. | +| [`strlen`](#strlen) | Returns the number of Unicode characters (technically: grapheme clusters) in the given string. | +| [`substr`](#substr) | Extracts a substring from the given string. | +| [`subtract`](#subtract) | Returns the difference between the two given numbers. | +| [`timeadd`](#timeadd) | Adds the duration represented by the given duration string to the given RFC 3339 timestamp string, returning another RFC 3339 timestamp. | +| [`timestamp`](#timestamp) | Returns a string representation of the current date and time. | +| [`title`](#title) | Replaces one letter after each non-letter and non-digit character with its uppercase equivalent. | +| [`trim`](#trim) | Removes consecutive sequences of characters in "cutset" from the start and end of the given string. | +| [`trimprefix`](#trimprefix) | Removes the given prefix from the start of the given string, if present. | +| [`trimspace`](#trimspace) | Removes any consecutive space characters (as defined by Unicode) from the start and end of the given string. | +| [`trimsuffix`](#trimsuffix) | Removes the given suffix from the start of the given string, if present. | +| [`try`](#try) | Variadic function that tries to evaluate all of is arguments in sequence until one succeeds, in which case it returns that result, or returns an error if none of them succeed. | +| [`unixtimestampparse`](#unixtimestampparse) | Given a unix timestamp integer, will parse and return an object representation of that date and time. A unix timestamp is the number of seconds elapsed since January 1, 1970 UTC. | +| [`upper`](#upper) | Returns the given string with all Unicode letters translated to their uppercase equivalents. | +| [`urlencode`](#urlencode) | Applies URL encoding to a given string. | +| [`uuidv4`](#uuidv4) | Generates and returns a Type-4 UUID in the standard hexadecimal string format. | +| [`uuidv5`](#uuidv5) | Generates and returns a Type-5 UUID in the standard hexadecimal string format. | +| [`values`](#values) | Returns the values of elements of a given map, or the values of attributes of a given object, in lexicographic order by key or attribute name. | +| [`zipmap`](#zipmap) | Constructs a map from a list of keys and a corresponding list of values, which must both be of the same length. | + + +<!---MARKER_STDLIB_END--> + +## `absolute` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + val = "${absolute(-42)}" # => 42 + } +} +``` + +## `add` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${add(123, 1)}" # => 124 + } +} +``` + +## `and` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${and(true, false)}" # => false + } +} +``` + +## `base64decode` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + decoded = "${base64decode("SGVsbG8=")}" # => "Hello" + } +} +``` + +## `base64encode` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + encoded = "${base64encode("Hello")}" # => "SGVsbG8=" + } +} +``` + +## `basename` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + file = "${basename("/usr/local/bin/docker")}" # => "docker" + } +} +``` + +## `bcrypt` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + hash = "${bcrypt("mypassword")}" # => "$2a$10$..." + } +} +``` + +## `byteslen` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + length = "${byteslen("Docker")}" # => 6 + } +} +``` + +## `bytesslice` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + part = "${bytesslice("Docker", 0, 3)}" # => "Doc" + } +} +``` + +## `can` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + safe = "${can(parseint("123", 10))}" # => true + } +} +``` + +## `ceil` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + rounded = "${ceil(3.14)}" # => 4" + } +} +``` + +## `chomp` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${chomp("Hello\n\n")}" # => "Hello" + } +} +``` + +## `chunklist` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${chunklist([1,2,3,4,5], 2)}" # => [[1,2],[3,4],[5]] + } +} +``` + +## `cidrhost` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${cidrhost("10.0.0.0/16", 5)}" # => "10.0.0.5" + } +} +``` + +## `cidrnetmask` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + mask = "${cidrnetmask("10.0.0.0/16")}" # => "255.255.0.0" + } +} +``` + +## `cidrsubnet` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + subnet = "${cidrsubnet("10.0.0.0/16", 4, 2)}" # => "10.0.32.0/20" + } +} +``` + +## `cidrsubnets` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + subs = "${cidrsubnets("10.0.0.0/16", 4, 4)}" # => ["10.0.0.0/20","10.0.16.0/20",...] + } +} +``` + +## `coalesce` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + first = "${coalesce(null, "", "docker")}" # => "docker" + } +} +``` + +## `coalescelist` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + first = "${coalescelist([], [1,2], [3])}" # => [1,2] + } +} +``` + +## `compact` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + list = "${compact(["a", "", "b", ""])}" # => ["a","b"] + } +} +``` + +## `concat` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + list = "${concat([1,2],[3,4])}" # => [1,2,3,4] + } +} +``` + +## `contains` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + check = "${contains([1,2,3], 2)}" # => true + } +} +``` + +## `convert` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${convert("42", number)}" # => 42 + } +} +``` + +## `csvdecode` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + data = "${csvdecode("name,age\nAlice,30\nBob,40")}" # => [{"name":"Alice","age":"30"},{"name":"Bob","age":"40"}] + } +} +``` + +## `dirname` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + dir = "${dirname("/usr/local/bin/docker")}" # => "/usr/local/bin" + } +} +``` + +## `distinct` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${distinct([1,2,2,3,3,3])}" # => [1,2,3] + } +} +``` + +## `divide` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${divide(10, 2)}" # => 5 + } +} +``` + +## `element` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + val = "${element(["a","b","c"], 1)}" # => "b" + } +} +``` + +## `equal` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + check = "${equal(2, 2)}" # => true + } +} +``` + +## `flatten` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + flat = "${flatten([[1,2],[3,4],[5]])}" # => [1,2,3,4,5] + } +} +``` + +## `floor` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${floor(3.99)}" # => 3 + } +} +``` + +## `format` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${format("Hello, %s!", "World")}" # => "Hello, World!" + } +} +``` + +## `formatdate` + +> [!WARNING] +> Deprecated: use `formattimestamp` instead. `formatdate` only accepts RFC3339 +> timestamp strings. + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + date = "${formatdate("YYYY-MM-DD", "2025-09-16T12:00:00Z")}" # => "2025-09-16" + } +} +``` + +## `formattimestamp` + +Formats either an RFC3339 timestamp string or a unix timestamp integer. +The special format `X` returns the unix timestamp in seconds. + +```hcl +# docker-bake.hcl +variable "SOURCE_DATE_EPOCH" { + type = number + default = formattimestamp("X", "2015-10-21T00:00:00Z") # => 1445385600 +} + +target "default" { + dockerfile = "Dockerfile" + labels = { + "org.opencontainers.image.created" = formattimestamp("YYYY-MM-DD'T'hh:mm:ssZ", SOURCE_DATE_EPOCH) # => "2015-10-21T00:00:00Z" + } + args = { + build_date = formattimestamp("YYYY-MM-DD", "2025-09-16T12:00:00Z") # => "2025-09-16" + } +} +``` + +## `formatlist` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + list = "${formatlist("Hi %s", ["Alice", "Bob"])}" # => ["Hi Alice","Hi Bob"] + } +} +``` + +## `greaterthan` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${greaterthan(2, 5)}" # => true + } +} +``` + +## `greaterthanorequalto` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${greaterthanorequalto(5, 5)}" # => true + } +} +``` + +## `hasindex` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + exists = "${hasindex([10, 20, 30], 1)}" # => true + missing = "${hasindex([10, 20, 30], 5)}" # => false + } +} +``` + +## `homedir` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + home = "${homedir()}" # => e.g., "/home/user" + } +} +``` + +## `indent` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + text = "${indent(4, "Hello\nWorld")}" + # => " Hello\n World" + } +} +``` + +## `index` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + val = "${index({foo = "bar", baz = "qux"}, "baz")}" # => "qux" + } +} +``` + +## `indexof` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + pos = "${indexof(["a","b","c"], "b")}" # => 1 + } +} +``` + +## `int` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + number = "${int(3.75)}" # => 3 + } +} +``` + +## `join` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + csv = "${join(",", ["a","b","c"])}" # => "a,b,c" + } +} +``` + +## `jsondecode` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + obj = "${jsondecode("{\"name\":\"Docker\",\"stars\":5}")}" # => {"name":"Docker","stars":5} + } +} +``` + +## `jsonencode` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + str = "${jsonencode({name="Docker", stars=5})}" # => "{\"name\":\"Docker\",\"stars\":5}" + } +} +``` + +## `keys` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + list = "${keys({foo = 1, bar = 2, baz = 3})}" + # => ["bar","baz","foo"] (sorted order) + } +} +``` + +## `length` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + size = "${length([1,2,3,4])}" # => 4 + } +} +``` + +## `lessthan` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${lessthan(10, 3)}" # => false + } +} +``` + +## `lessthanorequalto` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${lessthanorequalto(5, 7)}" # => true + } +} +``` + +## `log` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + val = "${log(100, 10)}" # => 2 + } +} +``` + +## `lookup` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + found = "${lookup({a="apple", b="banana"}, "a", "none")}" # => "apple" + fallback = "${lookup({a="apple", b="banana"}, "c", "none")}" # => "none" + } +} +``` +## `lower` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + val = "${lower("HELLO")}" # => "hello" + } +} +``` + +## `max` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${max(3, 9, 7)}" # => 9 + } +} +``` + +## `md5` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + hash = "${md5("docker")}" # => "597dd5f6a..." (hex string) + } +} +``` + +## `merge` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + combined = "${merge({a=1, b=2}, {b=3, c=4})}" # => {a=1, b=3, c=4} + } +} +``` + +## `min` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${min(3, 9, 7)}" # => 3 + } +} +``` + +## `modulo` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${modulo(10, 3)}" # => 1 + } +} +``` + +## `multiply` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${multiply(6, 7)}" # => 42 + } +} +``` + +## `negate` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${negate(7)}" # => -7 + } +} +``` + +## `not` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${not(true)}" # => false + } +} +``` + +## `notequal` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${notequal(4, 5)}" # => true + } +} +``` + +## `or` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${or(true, false)}" # => true + } +} +``` + +## `parseint` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${parseint("ff", 16)}" # => 255 + } +} +``` + +## `pow` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${pow(3, 2)}" # => 9 + } +} +``` + +## `range` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${range(0, 5)}" # => [0,1,2,3,4] + } +} +``` + +## `regex` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${regex("h.llo", "hello")}" # => "hello" + } +} +``` + +## `regex_replace` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${regex_replace("[0-9]+", "abc123xyz", "NUM")}" # => "abcNUMxyz" + } +} +``` + +## `regexall` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = matches = "${regexall("[a-z]+", "abc123xyz")}" # => ["abc","xyz"] + } +} +``` + +## `replace` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${replace("banana", "na", "--")}" # => "ba-- --" + } +} +``` + +## `reverse` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${reverse("stressed")}" # => "desserts" + } +} +``` + +## `reverselist` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${reverselist([1,2,3])}" # => [3,2,1] + } +} +``` + +## `rsadecrypt` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${rsadecrypt("eczGaDhXDbOFRZ...", "MIIEowIBAAKCAQEAgUElV5...")}" + } +} +``` + +## `sanitize` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${sanitize("My App! v1.0")}" # => "My_App__v1_0" + } +} +``` + +## `semvercmp` + +This function checks if a semantic version fits within a set of constraints. +See [Checking Version Constraints](https://github.com/Masterminds/semver?tab=readme-ov-file#checking-version-constraints) +for details. + +```hcl +# docker-bake.hcl +variable "ALPINE_VERSION" { + default = "3.23" +} + +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + platforms = semvercmp(ALPINE_VERSION, ">= 3.20") ? [ + "linux/amd64", + "linux/arm64", + "linux/riscv64" + ] : [ + "linux/amd64", + "linux/arm64" + ] +} +``` + +## `sethaselement` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${sethaselement([1,2,3], 2)}" # => true + } +} +``` + +## `setintersection` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${setintersection(["a","b","c"], ["b","c","d"])}" # => ["b","c"] + } +} +``` + +## `setproduct` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${setproduct(["x","y"], [1,2])}" # => [["x",1],["x",2],["y",1],["y",2]] + } +} +``` + +## `setsubtract` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${setsubtract([1,2,3], [2])}" # => [1,3] + } +} +``` + +## `setsymmetricdifference` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${setsymmetricdifference([1,2,3], [3,4])}" # => [1,2,4] + } +} +``` + +## `setunion` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${setunion(["a","b"], ["b","c"])}" # => ["a","b","c"] + } +} +``` + +## `sha1` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${sha1("hello")}" # => "aaf4c61d..." (hex) + } +} +``` + +## `sha256` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${sha256("hello")}" # => "2cf24dba..." (hex) + } +} +``` + +## `sha512` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${sha512("hello")}" # => "9b71d224..." (hex) + } +} +``` + +## `signum` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + zero = "${signum(0)}" # => 0 + pos = "${signum(12)}" # => 1 + neg = "${signum(-12)}" # => -1 + } +} +``` + +## `slice` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${slice(["a","b","c","d"], 1, 3)}" # => ["b","c"] + } +} +``` + +## `sort` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${sort(["b","c","a"])}" # => ["a","b","c"] + } +} +``` + +## `split` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${split(",", "a,b,c")}" # => ["a","b","c"] + } +} +``` + +## `strlen` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${strlen("Docker")}" # => 6 + } +} +``` + +## `substr` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${substr("abcdef", 2, 3)}" # => "cde" + } +} +``` + +## `subtract` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${subtract(10, 3)}" # => 7 + } +} +``` + +## `timeadd` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${timeadd("2025-09-24T12:00:00Z", "1h30m")}" # => "2025-09-24T13:30:00Z" + } +} +``` + +## `timestamp` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${timestamp()}" # => current RFC3339 time at evaluation + } +} +``` + +## `title` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${title("hello world-from_docker")}" # => "Hello World-From_Docker" + } +} +``` + +## `trim` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${trim("--hello--", "-")}" # => "hello" + } +} +``` + +## `trimprefix` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${trimprefix("docker-build", "docker-")}" # => "build" + } +} +``` + +## `trimspace` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${trimspace(" hello ")}" # => "hello" + } +} +``` + +## `trimsuffix` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${trimsuffix("filename.txt", ".txt")}" # => "filename" + } +} +``` + +## `try` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + # First expr errors (invalid hex), second succeeds → returns 255 + val1 = "${try(parseint("zz", 16), parseint("ff", 16))}" # => 255 + + # First expr errors (missing key), fallback string is used + val2 = "${try(index({a="apple"}, "b"), "fallback")}" # => "fallback" + } +} +``` + +## `unixtimestampparse` + +The returned object has the following attributes: +* `year` (Number) The year for the unix timestamp. +* `year_day` (Number) The day of the year for the unix timestamp, in the range 1-365 for non-leap years, and 1-366 in leap years. +* `day` (Number) The day of the month for the unix timestamp. +* `month` (Number) The month of the year for the unix timestamp. +* `month_name` (String) The name of the month for the unix timestamp (ex. "January"). +* `weekday` (Number) The day of the week for the unix timestamp. +* `weekday_name` (String) The name of the day for the unix timestamp (ex. "Sunday"). +* `hour` (Number) The hour within the day for the unix timestamp, in the range 0-23. +* `minute` (Number) The minute offset within the hour for the unix timestamp, in the range 0-59. +* `second` (Number) The second offset within the minute for the unix timestamp, in the range 0-59. +* `rfc3339` (String) The RFC3339 format string. +* `iso_year` (Number) The ISO 8601 year number. +* `iso_week` (Number) The ISO 8601 week number. + +```hcl +# docker-bake.hcl +variable "SOURCE_DATE_EPOCH" { + type = number + default = 1690328596 +} + +target "default" { + args = { + SOURCE_DATE_EPOCH = SOURCE_DATE_EPOCH + } + labels = { + "org.opencontainers.image.created" = unixtimestampparse(SOURCE_DATE_EPOCH).rfc3339 + } +} +``` + +## `upper` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + result = "${upper("hello")}" # => "HELLO" + } +} +``` + +## `urlencode` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + url = "${urlencode("a b=c&d")}" # => "a+b%3Dc%26d" + } +} +``` + +## `uuidv4` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + id = "${uuidv4()}" # => random v4 UUID each evaluation + } +} +``` + +## `uuidv5` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + # Uses the DNS namespace UUID per RFC 4122 + # "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + id = "${uuidv5("6ba7b810-9dad-11d1-80b4-00c04fd430c8", "example.com")}" + # => always "9073926b-929f-31c2-abc9-fad77ae3e8eb" for "example.com" + } +} +``` + +## `values` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + vals = "${values({a=1, c=3, b=2})}" # => [1,2,3] (ordered by key: a,b,c) + } +} +``` + +## `zipmap` + +```hcl +# docker-bake.hcl +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = ["docker.io/username/webapp:latest"] + args = { + obj = "${zipmap(["name","stars"], ["Docker", 5])}" # => {name="Docker", stars=5} + } +} +``` diff --git a/_vendor/github.com/docker/cli/docs/deprecated.md b/_vendor/github.com/docker/cli/docs/deprecated.md index 30fe60f0ea2..4271bbf0ac7 100644 --- a/_vendor/github.com/docker/cli/docs/deprecated.md +++ b/_vendor/github.com/docker/cli/docs/deprecated.md @@ -53,16 +53,22 @@ The following table provides an overview of the current status of deprecated fea | Status | Feature | Deprecated | Remove | |------------|------------------------------------------------------------------------------------------------------------------------------------|------------|--------| +| Deprecated | [Support for cgroup v1](#support-for-cgroup-v1) | v29.0 | - | +| Deprecated | [`--pause` option on `docker commit`](#--pause-option-on-docker-commit) | v29.0 | v30.0 | +| Deprecated | [Legacy links environment variables](#legacy-links-environment-variables) | v28.4 | v30.0 | +| Deprecated | [Special handling for quoted values for TLS flags](#special-handling-for-quoted-values-for-tls-flags) | v28.4 | v29.0 | +| Deprecated | [Empty/nil fields in image Config from inspect API](#emptynil-fields-in-image-config-from-inspect-api) | v28.3 | v29.0 | | Deprecated | [Configuration for pushing non-distributable artifacts](#configuration-for-pushing-non-distributable-artifacts) | v28.0 | v29.0 | | Deprecated | [`--time` option on `docker stop` and `docker restart`](#--time-option-on-docker-stop-and-docker-restart) | v28.0 | - | -| Deprecated | [Non-standard fields in image inspect](#non-standard-fields-in-image-inspect) | v27.0 | v28.0 | +| Removed | [Non-standard fields in image inspect](#non-standard-fields-in-image-inspect) | v27.0 | v28.2 | | Removed | [API CORS headers](#api-cors-headers) | v27.0 | v28.0 | -| Deprecated | [Graphdriver plugins (experimental)](#graphdriver-plugins-experimental) | v27.0 | v28.0 | +| Removed | [Graphdriver plugins (experimental)](#graphdriver-plugins-experimental) | v27.0 | v28.0 | | Deprecated | [Unauthenticated TCP connections](#unauthenticated-tcp-connections) | v26.0 | v28.0 | -| Deprecated | [`Container` and `ContainerConfig` fields in Image inspect](#container-and-containerconfig-fields-in-image-inspect) | v25.0 | v26.0 | -| Deprecated | [Deprecate legacy API versions](#deprecate-legacy-api-versions) | v25.0 | v26.0 | +| Removed | [`Container` and `ContainerConfig` fields in Image inspect](#container-and-containerconfig-fields-in-image-inspect) | v25.0 | v26.0 | +| Removed | [Deprecate legacy API versions](#deprecate-legacy-api-versions) | v25.0 | v26.0 | | Removed | [Container short ID in network Aliases field](#container-short-id-in-network-aliases-field) | v25.0 | v26.0 | -| Deprecated | [IsAutomated field, and `is-automated` filter on `docker search`](#isautomated-field-and-is-automated-filter-on-docker-search) | v25.0 | v26.0 | +| Removed | [Mount `bind-nonrecursive` option](#mount-bind-nonrecursive-option) | v25.0 | v29.0 | +| Removed | [IsAutomated field, and `is-automated` filter on `docker search`](#isautomated-field-and-is-automated-filter-on-docker-search) | v25.0 | v28.2 | | Removed | [logentries logging driver](#logentries-logging-driver) | v24.0 | v25.0 | | Removed | [OOM-score adjust for the daemon](#oom-score-adjust-for-the-daemon) | v24.0 | v25.0 | | Removed | [BuildKit build information](#buildkit-build-information) | v23.0 | v24.0 | @@ -71,7 +77,7 @@ The following table provides an overview of the current status of deprecated fea | Removed | [Btrfs storage driver on CentOS 7 and RHEL 7](#btrfs-storage-driver-on-centos-7-and-rhel-7) | v20.10 | v23.0 | | Removed | [Support for encrypted TLS private keys](#support-for-encrypted-tls-private-keys) | v20.10 | v23.0 | | Removed | [Kubernetes stack and context support](#kubernetes-stack-and-context-support) | v20.10 | v23.0 | -| Deprecated | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | - | +| Removed | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | v28.2 | | Removed | [Linux containers on Windows (LCOW)](#linux-containers-on-windows-lcow-experimental) | v20.10 | v23.0 | | Deprecated | [BLKIO weight options with cgroups v1](#blkio-weight-options-with-cgroups-v1) | v20.10 | - | | Removed | [Kernel memory limit](#kernel-memory-limit) | v20.10 | v23.0 | @@ -80,9 +86,9 @@ The following table provides an overview of the current status of deprecated fea | Deprecated | [CLI plugins support](#cli-plugins-support) | v20.10 | - | | Deprecated | [Dockerfile legacy `ENV name value` syntax](#dockerfile-legacy-env-name-value-syntax) | v20.10 | - | | Removed | [`docker build --stream` flag (experimental)](#docker-build---stream-flag-experimental) | v20.10 | v20.10 | -| Deprecated | [`fluentd-async-connect` log opt](#fluentd-async-connect-log-opt) | v20.10 | v28.0 | +| Removed | [`fluentd-async-connect` log opt](#fluentd-async-connect-log-opt) | v20.10 | v28.0 | | Removed | [Configuration options for experimental CLI features](#configuration-options-for-experimental-cli-features) | v19.03 | v23.0 | -| Deprecated | [Pushing and pulling with image manifest v2 schema 1](#pushing-and-pulling-with-image-manifest-v2-schema-1) | v19.03 | v27.0 | +| Removed | [Pushing and pulling with image manifest v2 schema 1](#pushing-and-pulling-with-image-manifest-v2-schema-1) | v19.03 | v28.2 | | Removed | [`docker engine` subcommands](#docker-engine-subcommands) | v19.03 | v20.10 | | Removed | [Top-level `docker deploy` subcommand (experimental)](#top-level-docker-deploy-subcommand-experimental) | v19.03 | v20.10 | | Removed | [`docker stack deploy` using "dab" files (experimental)](#docker-stack-deploy-using-dab-files-experimental) | v19.03 | v20.10 | @@ -120,10 +126,127 @@ The following table provides an overview of the current status of deprecated fea | Removed | [`--run` flag on `docker commit`](#--run-flag-on-docker-commit) | v0.10 | v1.13 | | Removed | [Three arguments form in `docker import`](#three-arguments-form-in-docker-import) | v0.6.7 | v1.12 | -## Configuration for pushing non-distributable artifacts +### Support for cgroup v1 -**Deprecated in Release: v28.0** -**Target For Removal In Release: v29.0** +**Deprecated in release: v29.0** + +Support for cgroup v1 is deprecated in the v29.0 release, however, it will continue +to be supported until May 2029. +The latest release in May 2029 may not necessarily support cgroup v1, +but there will be at least one maintained branch with the support for cgroup v1. + +The cgroup version currently in use can be checked by running the `docker info` command: + +```console +$ docker info +<...> +Server: + <...> + Cgroup Version: 2 + <...> +``` + +### `--pause` option on `docker commit` + +**Deprecated in release: v29.0** + +**Target for removal in release: v30.0** + +The `--pause` option is enabled by default since Docker v1.1.0 to prevent +committing containers in an inconsistent state, but can be disabled by +setting the `--pause=false` option. In docker CLI v29.0 this flag is +replaced by a `--no-pause` flag instead. The `--pause` option is still +functional in the v29.0 release, printing a deprecation warning, but +will be removed in docker CLI v30. + +### Legacy links environment variables + +**Deprecated in release: v28.4** + +**Disabled by default in release: v29.0** + +**Target for removal in release: v30.0** + +Containers attached to the default bridge network can specify "legacy links" (e.g. +using `--links` on the CLI) to get access to other containers attached to that +network. The linking container (i.e., the container created with `--links`) automatically +gets environment variables that specify the IP address and port mappings of the linked +container. However, these environment variables are prefixed with the linked +container's names, making them impractical. + +Starting with Docker v29.0, these environment variables are no longer set by +default. Users who still depend on them can start Docker Engine with the +environment variable `DOCKER_KEEP_DEPRECATED_LEGACY_LINKS_ENV_VARS=1` set. + +Support for legacy links environment variables, as well as the `DOCKER_KEEP_DEPRECATED_LEGACY_LINKS_ENV_VARS` +will be removed in Docker Engine v30.0. + +### Special handling for quoted values for TLS flags + +**Deprecated in release: v28.4** + +**Target for removal in release: v29.0** + +The `--tlscacert`, `--tlscert`, and `--tlskey` command-line flags had +non-standard behavior for handling values contained in quotes (`"` or `'`). +Normally, quotes are handled by the shell, for example, in the following +example, the shell takes care of handling quotes before passing the values +to the `docker` CLI: + +```console +docker --some-option "some-value-in-quotes" ... +``` + +However, when passing values using an equal sign (`=`), this may not happen +and values may be handled including quotes; + +```console +docker --some-option="some-value-in-quotes" ... +``` + +This caused issues with "Docker Machine", which used this format as part +of its `docker-machine config` output, and the CLI carried special, non-standard +handling for these flags. + +Docker Machine reached EOL, and this special handling made the processing +of flag values inconsistent with other flags used, so this behavior is +deprecated. Users depending on this behavior are recommended to specify +the quoted values using a space between the flag and its value, as illustrated +above. + +### Empty/nil fields in image Config from inspect API + +**Deprecated in release: v28.3** + +**Target for removal in release: v29.0** + +The `Config` field returned by `docker image inspect` (and the `GET /images/{name}/json` +API endpoint) currently includes certain fields even when they are empty or nil. +Starting in Docker v29.0, the following fields will be omitted from the API response +when they contain empty or default values: + +- `Cmd` +- `Entrypoint` +- `Env` +- `Labels` +- `OnBuild` +- `User` +- `Volumes` +- `WorkingDir` + +Applications consuming the image inspect API should be updated to handle the +absence of these fields gracefully, treating missing fields as having their +default/empty values. + +For API version corresponding to Docker v29.0, these fields will be omitted when +empty. They will continue to be included when using clients that request an older +API version for backward compatibility. + +### Configuration for pushing non-distributable artifacts + +**Deprecated in release: v28.0** + +**Target for removal in release: v29.0** Non-distributable artifacts (also called foreign layers) were introduced in docker v1.12 to accommodate Windows images for which the EULA did not allow @@ -161,7 +284,7 @@ entirely. ### `--time` option on `docker stop` and `docker restart` -**Deprecated in Release: v28.0** +**Deprecated in release: v28.0** The `--time` option for the `docker stop`, `docker container stop`, `docker restart`, and `docker container restart` commands has been renamed to `--timeout` for @@ -171,8 +294,9 @@ Users are encouraged to migrate to using the `--timeout` option instead. ### Non-standard fields in image inspect -**Deprecated in Release: v27.0** -**Target For Removal In Release: v28.0** +**Deprecated in release: v27.0** + +**Removed in release: v28.2** The `Config` field returned shown in `docker image inspect` (and as returned by the `GET /images/{name}/json` API endpoint) returns additional fields that are @@ -184,8 +308,9 @@ but are not omitted in the response when left empty. As these fields were not intended to be part of the image configuration response, they are deprecated, and will be removed from the API in thee next release. -The following fields are currently included in the API response, but are not -part of the underlying image's `Config` field, and deprecated: +The following fields are not part of the underlying image's `Config` field, and +removed in the API response for API v1.50 and newer, corresponding with v28.2. +They continue to be included when using clients that use an older API version: - `Hostname` - `Domainname` @@ -196,39 +321,37 @@ part of the underlying image's `Config` field, and deprecated: - `OpenStdin` - `StdinOnce` - `Image` -- `NetworkDisabled` (already omitted unless set) -- `MacAddress` (already omitted unless set) -- `StopTimeout` (already omitted unless set) +- `NetworkDisabled` (omitted unless set on older API versions) +- `MacAddress` (omitted unless set on older API versions) +- `StopTimeout` (omitted unless set on older API versions) [Docker image specification]: https://github.com/moby/docker-image-spec/blob/v1.3.1/specs-go/v1/image.go#L19-L32 [OCI image specification]: https://github.com/opencontainers/image-spec/blob/v1.1.0/specs-go/v1/config.go#L24-L62 ### Graphdriver plugins (experimental) -**Deprecated in Release: v27.0** -**Disabled by default in Release: v27.0** -**Target For Removal In Release: v28.0** +**Deprecated in**: v27.0**. + +**Disabled by default in release: v27.0** + +**Target for removal in release: v28.0** [Graphdriver plugins](https://github.com/docker/cli/blob/v26.1.4/docs/extend/plugins_graphdriver.md) -are an experimental feature that allow extending the Docker Engine with custom +were an experimental feature that allowed extending the Docker Engine with custom storage drivers for storing images and containers. This feature was not -maintained since its inception, and will no longer be supported in upcoming -releases. +maintained since its inception. -Support for graphdriver plugins is disabled by default in v27.0, and will be -removed v28.0. An `DOCKERD_DEPRECATED_GRAPHDRIVER_PLUGINS` environment variable -is provided in v27.0 to re-enable the feature. This environment variable must -be set to a non-empty value in the daemon's environment. - -The `DOCKERD_DEPRECATED_GRAPHDRIVER_PLUGINS` environment variable, along with -support for graphdriver plugins, will be removed in v28.0. Users of this feature -are recommended to instead configure the Docker Engine to use the [containerd image store](https://docs.docker.com/storage/containerd/) +Support for graphdriver plugins was disabled by default in v27.0, and removed +in v28.0. Users of this feature are recommended to instead configure the Docker +Engine to use the [containerd image store](https://docs.docker.com/storage/containerd/) and a custom [snapshotter](https://github.com/containerd/containerd/tree/v1.7.18/docs/snapshotters) ### API CORS headers -**Deprecated in Release: v27.0** -**Disabled by default in Release: v27.0** +**Deprecated in release: v27.0** + +**Disabled by default in release: v27.0** + **Removed in release: v28.0** The `api-cors-header` configuration option for the Docker daemon is insecure, @@ -248,8 +371,9 @@ If you need to access the API through a browser, use a reverse proxy. ### Unauthenticated TCP connections -**Deprecated in Release: v26.0** -**Target For Removal In Release: v28.0** +**Deprecated in release: v26.0** + +**Target for removal in release: v28.0** Configuring the Docker daemon to listen on a TCP address will require mandatory TLS verification. This change aims to ensure secure communication by preventing @@ -275,21 +399,23 @@ configuring TLS (or SSH) for the Docker daemon, refer to ### `Container` and `ContainerConfig` fields in Image inspect -**Deprecated in Release: v25.0** -**Target For Removal In Release: v26.0** +**Deprecated in release: v25.0** + +**Removed in release: v26.0** The `Container` and `ContainerConfig` fields returned by `docker inspect` are mostly an implementation detail of the classic (non-BuildKit) image builder. These fields are not portable and are empty when using the BuildKit-based builder (enabled by default since v23.0). -These fields are deprecated in v25.0 and will be omitted starting from v26.0. -If image configuration of an image is needed, you can obtain it from the -`Config` field. +These fields are deprecated in v25.0 and are omitted starting from v26.0 ( +API version v1.45 and up). If image configuration of an image is needed, +you can obtain it from the `Config` field. ### Deprecate legacy API versions -**Deprecated in Release: v25.0** -**Target For Removal In Release: v26.0** +**Deprecated in release: v25.0** + +**Target for removal in release: v26.0** The Docker daemon provides a versioned API for backward compatibility with old clients. Docker clients can perform API-version negotiation to select the most @@ -326,25 +452,28 @@ Error response from daemon: client version 1.23 is too old. Minimum supported AP upgrade your client to a newer version ``` +Support for API versions lower than `1.24` has been permanently removed in Docker +Engine v26, and the minimum supported API version will be incrementally raised +in releases following that. + +<!-- keeping the paragraphs below for when we incrementally raise the minimum API version --> +<!-- An environment variable (`DOCKER_MIN_API_VERSION`) is introduced that allows re-enabling older API versions in the daemon. This environment variable must be set in the daemon's environment (for example, through a [systemd override file](https://docs.docker.com/config/daemon/systemd/)), and the specified -API version must be supported by the daemon (`1.12` or higher on Linux, or -`1.24` or higher on Windows). - -Support for API versions lower than `1.24` will be permanently removed in Docker -Engine v26, and the minimum supported API version will be incrementally raised -in releases following that. +API version must be supported by the daemon (`1.24` or higher). We do not recommend depending on the `DOCKER_MIN_API_VERSION` environment variable other than for exceptional cases where it's not possible to update old clients, and those clients must be supported. +--> ### Container short ID in network Aliases field -**Deprecated in Release: v25.0** -**Removed In Release: v26.0** +**Deprecated in release: v25.0** + +**Removed in release: v26.0** The `Aliases` field returned by `docker inspect` contains the container short ID once the container is started. This behavior is deprecated in v25.0 but @@ -356,10 +485,32 @@ A new field `DNSNames` containing the container name (if one was specified), the hostname, the network aliases, as well as the container short ID, has been introduced in v25.0 and should be used instead of the `Aliases` field. +### Mount `bind-nonrecursive` option + +**Deprecated in release: v25.0** + +**Removed in release: v29.0** + +The `bind-nonrecursive` option was replaced with the [`bind-recursive`] +option (see [cli-4316], [cli-4671]). The option was still accepted, but +printed a deprecation warning: + +```console +bind-nonrecursive is deprecated, use bind-recursive=disabled instead +``` + +In the v29.0 release, this warning is removed, and returned as an error. +Users should use the equivalent `bind-recursive=disabled` option instead. + +[`bind-recursive`]: https://docs.docker.com/engine/storage/bind-mounts/#recursive-mounts +[cli-4316]: https://github.com/docker/cli/pull/4316 +[cli-4671]: https://github.com/docker/cli/pull/4671 + ### IsAutomated field, and `is-automated` filter on `docker search` -**Deprecated in Release: v25.0** -**Target For Removal In Release: v26.0** +**Deprecated in release: v25.0** + +**Removed in release: v28.2** The `is_automated` field has been deprecated by Docker Hub's search API. Consequently, the `IsAutomated` field in image search will always be set @@ -368,12 +519,13 @@ results. The `AUTOMATED` column has been removed from the default `docker search` and `docker image search` output in v25.0, and the corresponding `IsAutomated` -templating option will be removed in v26.0. +templating has been removed in v28.2. ### Logentries logging driver -**Deprecated in Release: v24.0** -**Removed in Release: v25.0** +**Deprecated in release: v24.0** + +**Removed in release: v25.0** The logentries service SaaS was shut down on November 15, 2022, rendering this logging driver non-functional. Users should no longer use this logging @@ -383,8 +535,9 @@ after upgrading. ### OOM-score adjust for the daemon -**Deprecated in Release: v24.0** -**Removed in Release: v25.0** +**Deprecated in release: v24.0** + +**Removed in release: v25.0** The `oom-score-adjust` option was added to prevent the daemon from being OOM-killed before other processes. This option was mostly added as a @@ -403,8 +556,9 @@ the daemon. ### BuildKit build information -**Deprecated in Release: v23.0** -**Removed in Release: v24.0** +**Deprecated in release: v23.0** + +**Removed in release: v24.0** [Build information](https://github.com/moby/buildkit/blob/v0.11/docs/buildinfo.md) structures have been introduced in [BuildKit v0.10.0](https://github.com/moby/buildkit/releases/tag/v0.10.0) @@ -415,7 +569,7 @@ information is also embedded into the image configuration if one is generated. ### Legacy builder for Linux images -**Deprecated in Release: v23.0** +**Deprecated in release: v23.0** Docker v23.0 now uses BuildKit by default to build Linux images, and uses the [Buildx](https://docs.docker.com/buildx/working-with-buildx/) CLI component for @@ -446,7 +600,7 @@ you to report issues in the [BuildKit issue tracker on GitHub](https://github.co ### Legacy builder fallback -**Deprecated in Release: v23.0** +**Deprecated in release: v23.0** [Docker v23.0 now uses BuildKit by default to build Linux images](#legacy-builder-for-linux-images), which requires the Buildx component to build images with BuildKit. There may be @@ -492,7 +646,7 @@ be possible in a future release. ### Btrfs storage driver on CentOS 7 and RHEL 7 -**Removed in Release: v23.0** +**Removed in release: v23.0** The `btrfs` storage driver on CentOS and RHEL was provided as a technology preview by CentOS and RHEL, but has been deprecated since the [Red Hat Enterprise Linux 7.4 release](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/storage_administration_guide/ch-btrfs), @@ -504,9 +658,9 @@ of Docker will no longer provide this driver. ### Support for encrypted TLS private keys -**Deprecated in Release: v20.10** +**Deprecated in release: v20.10** -**Removed in Release: v23.0** +**Removed in release: v23.0** Use of encrypted TLS private keys has been deprecated, and has been removed. Golang has deprecated support for legacy PEM encryption (as specified in @@ -520,8 +674,9 @@ to decrypt the private key, and store it un-encrypted to continue using it. ### Kubernetes stack and context support -**Deprecated in Release: v20.10** -**Removed in Release: v23.0** +**Deprecated in release: v20.10** + +**Removed in release: v23.0** Following the deprecation of [Compose on Kubernetes](https://github.com/docker/compose-on-kubernetes), support for Kubernetes in the `stack` and `context` commands has been removed from @@ -549,7 +704,9 @@ CLI configuration file are no longer used, and ignored. ### Pulling images from non-compliant image registries -**Deprecated in Release: v20.10** +**Deprecated in release: v20.10** + +**Removed in release: v28.2** Docker Engine v20.10 and up includes optimizations to verify if images in the local image cache need updating before pulling, preventing the Docker Engine @@ -559,7 +716,7 @@ image registry to conform to the [Open Container Initiative Distribution Specifi While most registries conform to the specification, we encountered some registries to be non-compliant, resulting in `docker pull` to fail. -As a temporary solution, Docker Engine v20.10 includes a fallback mechanism to +As a temporary solution, Docker Engine v20.10 added a fallback mechanism to allow `docker pull` to be functional when using a non-compliant registry. A warning message is printed in this situation: @@ -568,21 +725,19 @@ warning message is printed in this situation: pull by tag. This fallback is DEPRECATED, and will be removed in a future release. -The fallback is added to allow users to either migrate their images to a compliant -registry, or for these registries to become compliant. - -Note that this fallback only addresses failures on `docker pull`. Other commands, -such as `docker stack deploy`, or pulling images with `containerd` will continue -to fail. +The fallback was added to allow users to either migrate their images to a +compliant registry, or for these registries to become compliant. -Given that other functionality is still broken with these registries, we consider -this fallback a _temporary_ solution, and will remove the fallback in an upcoming -major release. +GitHub deprecated the legacy `docker.pkg.github.com` registry, and it was +[sunset on Feb 24th, 2025](https://github.blog/changelog/2025-01-23-legacy-docker-registry-closing-down/) +in favor of GitHub Container Registry (GHCR, ghcr.io), making this fallback +no longer needed. ### Linux containers on Windows (LCOW) (experimental) -**Deprecated in Release: v20.10** -**Removed in Release: v23.0** +**Deprecated in release: v20.10** + +**Removed in release: v23.0** The experimental feature to run Linux containers on Windows (LCOW) was introduced as a technical preview in Docker 17.09. While many enhancements were made after @@ -594,7 +749,7 @@ Developers who want to run Linux workloads on a Windows host are encouraged to u ### BLKIO weight options with cgroups v1 -**Deprecated in Release: v20.10** +**Deprecated in release: v20.10** Specifying blkio weight (`docker run --blkio-weight` and `docker run --blkio-weight-device`) is now marked as deprecated when using cgroups v1 because the corresponding features @@ -604,35 +759,23 @@ When using cgroups v2, the `--blkio-weight` options are implemented using ### Kernel memory limit -**Deprecated in Release: v20.10** -**Removed in Release: v23.0** +**Deprecated in release: v20.10** + +**Removed in release: v23.0** Specifying kernel memory limit (`docker run --kernel-memory`) is no longer supported because the [Linux kernel deprecated `kmem.limit_in_bytes` in v5.4](https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7). -The OCI runtime specification now marks this option (as well as `--kernel-memory-tcp`) -as ["NOT RECOMMENDED"](https://github.com/opencontainers/runtime-spec/pull/1093), +The OCI runtime specification now marks this option as ["NOT RECOMMENDED"](https://github.com/opencontainers/runtime-spec/pull/1093), and OCI runtimes such as `runc` no longer support this option. -Docker API v1.42 and up now ignores this option when set. Older versions of the -API continue to accept the option, but depending on the OCI runtime used, may -take no effect. - -> [!NOTE] -> While not deprecated (yet) in Docker, the OCI runtime specification also -> deprecated the `memory.kmem.tcp.limit_in_bytes` option. When using `runc` as -> runtime, this option takes no effect. The Linux kernel did not explicitly -> deprecate this feature, and there is a tracking ticket in the `runc` issue -> tracker to determine if this option should be reinstated or if this was an -> oversight of the Linux kernel maintainers (see [opencontainers/runc#3174](https://github.com/opencontainers/runc/issues/3174)). -> -> The `memory.kmem.tcp.limit_in_bytes` option is only supported with cgroups v1, -> and not available on installations running with cgroups v2. This option is -> only supported by the API, and not exposed on the `docker` command-line. +The Docker API no longer handles the kernel-memory fields, and Docker CLI v29.0 +removes the `--kernel-memory` option. ### Classic Swarm and overlay networks using cluster store -**Deprecated in Release: v20.10** -**Removed in Release: v23.0** +**Deprecated in release: v20.10** + +**Removed in release: v23.0** Standalone ("classic") Swarm has been deprecated, and with that the use of overlay networks using an external key/value store. The corresponding`--cluster-advertise`, @@ -640,8 +783,9 @@ networks using an external key/value store. The corresponding`--cluster-advertis ### Support for legacy `~/.dockercfg` configuration files -**Deprecated in Release: v20.10** -**Removed in Release: v23.0** +**Deprecated in release: v20.10** + +**Removed in release: v23.0** The Docker CLI up until v1.7.0 used the `~/.dockercfg` file to store credentials after authenticating to a registry (`docker login`). Docker v1.7.0 replaced this @@ -656,9 +800,9 @@ been removed. ### Configuration options for experimental CLI features -**Deprecated in Release: v19.03** +**Deprecated in release: v19.03** -**Removed in Release: v23.0** +**Removed in release: v23.0** The `DOCKER_CLI_EXPERIMENTAL` environment variable and the corresponding `experimental` field in the CLI configuration file are deprecated. Experimental features are @@ -670,13 +814,13 @@ format. ### CLI plugins support -**Deprecated in Release: v20.10** +**Deprecated in release: v20.10** CLI Plugin API is now marked as deprecated. ### Dockerfile legacy `ENV name value` syntax -**Deprecated in Release: v20.10** +**Deprecated in release: v20.10** The Dockerfile `ENV` instruction allows values to be set using either `ENV name=value` or `ENV name value`. The latter (`ENV name value`) form can be ambiguous, for example, @@ -700,8 +844,9 @@ ENV ONE="" TWO="" THREE="world" ### `docker build --stream` flag (experimental) -**Deprecated in Release: v20.10** -**Removed in Release: v20.10** +**Deprecated in release: v20.10** + +**Removed in release: v20.10** Docker v17.07 introduced an experimental `--stream` flag on `docker build` which allowed the build-context to be incrementally sent to the daemon, instead of @@ -717,8 +862,9 @@ files. ### `fluentd-async-connect` log opt -**Deprecated in Release: v20.10** -**Removed in Release: v28.0** +**Deprecated in release: v20.10** + +**Removed in release: v28.0** The `--log-opt fluentd-async-connect` option for the fluentd logging driver is [deprecated in favor of `--log-opt fluentd-async`](https://github.com/moby/moby/pull/39086). @@ -729,15 +875,16 @@ fluent#New: AsyncConnect is now deprecated, use Async instead ``` Users are encouraged to use the `fluentd-async` option going forward, as support -for the old option will be removed in a future release. +for the old option has been removed. ### Pushing and pulling with image manifest v2 schema 1 -**Deprecated in Release: v19.03** +**Deprecated in release: v19.03** -**Disabled by default in Release: v26.0** +**Disabled by default in release: v26.0** + +**Removed in release: v28.2** -**Target For Removal In Release: v27.0** The image manifest [v2 schema 1](https://distribution.github.io/distribution/spec/deprecated-schema-v1/) and "Docker Image v1" formats were deprecated in favor of the @@ -748,28 +895,22 @@ formats. These legacy formats should no longer be used, and users are recommended to update images to use current formats, or to upgrade to more current images. Starting with Docker v26.0, pulling these images is disabled by default, and -produces an error when attempting to pull the image: +support has been removed in v28.2. Attempting to pull a legacy image now +produces an error: ```console $ docker pull ubuntu:10.04 Error response from daemon: -[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. +Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of docker.io/library/ubuntu:10.04 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/ ``` -An environment variable (`DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE`) is -added in Docker v26.0 that allows re-enabling support for these image formats -in the daemon. This environment variable must be set to a non-empty value in -the daemon's environment (for example, through a [systemd override file](https://docs.docker.com/config/daemon/systemd/)). -Support for the `DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE` environment variable -will be removed in Docker v27.0 after which this functionality is removed permanently. - ### `docker engine` subcommands -**Deprecated in Release: v19.03** +**Deprecated in release: v19.03** -**Removed in Release: v20.10** +**Removed in release: v20.10** The `docker engine activate`, `docker engine check`, and `docker engine update` provided an alternative installation method to upgrade Docker Community engines @@ -782,9 +923,9 @@ standard package managers. ### Top-level `docker deploy` subcommand (experimental) -**Deprecated in Release: v19.03** +**Deprecated in release: v19.03** -**Removed in Release: v20.10** +**Removed in release: v20.10** The top-level `docker deploy` command (using the "Docker Application Bundle" (.dab) file format was introduced as an experimental feature in Docker 1.13 / @@ -793,9 +934,9 @@ subcommand. ### `docker stack deploy` using "dab" files (experimental) -**Deprecated in Release: v19.03** +**Deprecated in release: v19.03** -**Removed in Release: v20.10** +**Removed in release: v20.10** With no development being done on this feature, and no active use of the file format, support for the DAB file format and the top-level `docker deploy` command @@ -804,8 +945,9 @@ using compose files. ### Support for the `overlay2.override_kernel_check` storage option -**Deprecated in Release: v19.03** -**Removed in Release: v24.0** +**Deprecated in release: v19.03** + +**Removed in release: v24.0** This daemon configuration option disabled the Linux kernel version check used to detect if the kernel supported OverlayFS with multiple lower dirs, which is @@ -815,8 +957,9 @@ option was no longer used. ### AuFS storage driver -**Deprecated in Release: v19.03** -**Removed in Release: v24.0** +**Deprecated in release: v19.03** + +**Removed in release: v24.0** The `aufs` storage driver is deprecated in favor of `overlay2`, and has been removed in a Docker Engine v24.0. Users of the `aufs` storage driver must @@ -834,8 +977,9 @@ maintenance of the `aufs` storage driver. ### Legacy overlay storage driver -**Deprecated in Release: v18.09** -**Removed in Release: v24.0** +**Deprecated in release: v18.09** + +**Removed in release: v24.0** The `overlay` storage driver is deprecated in favor of the `overlay2` storage driver, which has all the benefits of `overlay`, without its limitations (excessive @@ -850,9 +994,11 @@ backported), there is no reason to keep maintaining the `overlay` storage driver ### Device mapper storage driver -**Deprecated in Release: v18.09** -**Disabled by default in Release: v23.0** -**Removed in Release: v25.0** +**Deprecated in release: v18.09** + +**Disabled by default in release: v23.0** + +**Removed in release: v25.0** The `devicemapper` storage driver is deprecated in favor of `overlay2`, and has been removed in Docker Engine v25.0. Users of the `devicemapper` storage driver @@ -868,9 +1014,9 @@ is no reason to continue maintenance of the `devicemapper` storage driver. ### Use of reserved namespaces in engine labels -**Deprecated in Release: v18.06** +**Deprecated in release: v18.06** -**Removed In Release: v20.10** +**Removed in release: v20.10** The namespaces `com.docker.*`, `io.docker.*`, and `org.dockerproject.*` in engine labels were always documented to be reserved, but there was never any enforcement. @@ -882,7 +1028,7 @@ use, and will error instead in v20.10 and above. **Disabled In Release: v17.12** -**Removed In Release: v19.03** +**Removed in release: v19.03** The `--disable-legacy-registry` flag was disabled in Docker 17.12 and will print an error when used. For this error to be printed, the flag itself is still present, @@ -890,9 +1036,9 @@ but hidden. The flag has been removed in Docker 19.03. ### Interacting with V1 registries -**Disabled By Default In Release: v17.06** +**Disabled by default in release: v17.06** -**Removed In Release: v17.12** +**Removed in release: v17.12** Version 1.8.3 added a flag (`--disable-legacy-registry=false`) which prevents the Docker daemon from `pull`, `push`, and `login` operations against v1 @@ -909,7 +1055,7 @@ start when set. ### Asynchronous `service create` and `service update` as default -**Deprecated In Release: v17.05** +**Deprecated in release: v17.05** **Disabled by default in release: [v17.10](https://github.com/docker/docker-ce/releases/tag/v17.10.0-ce)** @@ -923,9 +1069,9 @@ and `docker service scale` in Docker 17.10. ### `-g` and `--graph` flags on `dockerd` -**Deprecated In Release: v17.05** +**Deprecated in release: v17.05** -**Removed In Release: v23.0** +**Removed in release: v23.0** The `-g` or `--graph` flag for the `dockerd` or `docker daemon` command was used to indicate the directory in which to store persistent data and resource @@ -934,9 +1080,9 @@ flag. These flags were deprecated and hidden in v17.05, and removed in v23.0. ### Top-level network properties in NetworkSettings -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Deprecated in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** -**Target For Removal In Release: v17.12** +**Target for removal in release: v17.12** When inspecting a container, `NetworkSettings` contains top-level information about the default ("bridge") network; @@ -953,18 +1099,18 @@ information. ### `filter` option for `/images/json` endpoint -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Deprecated in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** -**Removed In Release: v20.10** +**Removed in release: v20.10** The `filter` option to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`. ### `repository:shortid` image references -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Deprecated in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** -**Removed In Release: v17.12** +**Removed in release: v17.12** The `repository:shortid` syntax for referencing images is very little used, collides with tag references, and can be confused with digest references. @@ -974,32 +1120,32 @@ in Docker 17.12. ### `docker daemon` subcommand -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Deprecated in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** -**Removed In Release: v17.12** +**Removed in release: v17.12** The daemon is moved to a separate binary (`dockerd`), and should be used instead. ### Duplicate keys with conflicting values in engine labels -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Deprecated in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** -**Removed In Release: v17.12** +**Removed in release: v17.12** When setting duplicate keys with conflicting values, an error will be produced, and the daemon will fail to start. ### `MAINTAINER` in Dockerfile -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Deprecated in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** `MAINTAINER` was an early very limited form of `LABEL` which should be used instead. ### API calls without a version -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Deprecated in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** -**Target For Removal In Release: v17.12** +**Target for removal in release: v17.12** API versions should be supplied to all API calls to ensure compatibility with future Engine versions. Instead of just requesting, for example, the URL @@ -1007,9 +1153,9 @@ future Engine versions. Instead of just requesting, for example, the URL ### Backing filesystem without `d_type` support for overlay/overlay2 -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Deprecated in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** -**Removed In Release: v17.12** +**Removed in release: v17.12** The overlay and overlay2 storage driver does not work as expected if the backing filesystem does not support `d_type`. For example, XFS does not support `d_type` @@ -1023,18 +1169,18 @@ Refer to [#27358](https://github.com/docker/docker/issues/27358) for details. ### `--automated` and `--stars` flags on `docker search` -**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Deprecated in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** -**Removed In Release: v20.10** +**Removed in release: v20.10** The `docker search --automated` and `docker search --stars` options are deprecated. Use `docker search --filter=is-automated=<true|false>` and `docker search --filter=stars=...` instead. ### `-h` shorthand for `--help` -**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Deprecated in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** -**Target For Removal In Release: v17.09** +**Target for removal in release: v17.09** The shorthand (`-h`) is less common than `--help` on Linux and cannot be used on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on @@ -1043,58 +1189,58 @@ on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on ### `-e` and `--email` flags on `docker login` -**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** +**Deprecated in release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** -**Removed In Release: [v17.06](https://github.com/docker/docker-ce/releases/tag/v17.06.0-ce)** +**Removed in release: [v17.06](https://github.com/docker/docker-ce/releases/tag/v17.06.0-ce)** The `docker login` no longer automatically registers an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated. ### Separator (`:`) of `--security-opt` flag on `docker run` -**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** +**Deprecated in release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** -**Target For Removal In Release: v17.06** +**Target for removal in release: v17.06** The flag `--security-opt` doesn't use the colon separator (`:`) anymore to divide keys and values, it uses the equal symbol (`=`) for consistency with other similar flags, like `--storage-opt`. ### Ambiguous event fields in API -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** +**Deprecated in release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. See the events API documentation for the new format. ### `-f` flag on `docker tag` -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** +**Deprecated in release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Removed in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is no longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. ### HostConfig at API container start -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** +**Deprecated in release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Removed in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of defining it at container creation (`POST /containers/create`). ### `--before` and `--since` flags on `docker ps` -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** +**Deprecated in release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Removed in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** The `docker ps --before` and `docker ps --since` options are deprecated. Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. ### Driver-specific log tags -**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** +**Deprecated in release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Removed in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** Log tags are now generated in a standard way across different logging drivers. Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and @@ -1106,9 +1252,9 @@ $ docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" ### Docker Content Trust ENV passphrase variables name change -**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** +**Deprecated in release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Removed in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables @@ -1117,25 +1263,25 @@ Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the ### `/containers/(id or name)/copy` endpoint -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** +**Deprecated in release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Removed in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`. ### LXC built-in exec driver -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** +**Deprecated in release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** -**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** +**Removed in release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed. ### Old Command Line Options -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** +**Deprecated in release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** -**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** +**Removed in release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** The flags `-d` and `--daemon` are deprecated. Use the separate `dockerd` binary instead. @@ -1179,34 +1325,34 @@ The following double-dash options are deprecated and have no replacement: - `docker ps --before-id` - `docker search --trusted` -**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** +**Deprecated in release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Removed in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** The single-dash (`-help`) was removed, in favor of the double-dash `--help` ### `--api-enable-cors` flag on `dockerd` -**Deprecated In Release: [v1.6.0](https://github.com/docker/docker/releases/tag/v1.6.0)** +**Deprecated in release: [v1.6.0](https://github.com/docker/docker/releases/tag/v1.6.0)** -**Removed In Release: [v17.09](https://github.com/docker/docker-ce/releases/tag/v17.09.0-ce)** +**Removed in release: [v17.09](https://github.com/docker/docker-ce/releases/tag/v17.09.0-ce)** The flag `--api-enable-cors` is deprecated since v1.6.0. Use the flag `--api-cors-header` instead. ### `--run` flag on `docker commit` -**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** +**Deprecated in release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** -**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** +**Removed in release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** The flag `--run` of the `docker commit` command (and its short version `-run`) were deprecated in favor of the `--changes` flag that allows to pass `Dockerfile` commands. ### Three arguments form in `docker import` -**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** +**Deprecated in release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** +**Removed in release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no longer supported. diff --git a/_vendor/github.com/docker/cli/docs/extend/plugin_api.md b/_vendor/github.com/docker/cli/docs/extend/plugin_api.md index a0b0a736755..0cf2d9e1d6b 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugin_api.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugin_api.md @@ -21,7 +21,7 @@ which registers itself by placing a file on the daemon host in one of the plugin directories described in [Plugin discovery](#plugin-discovery). Plugins have human-readable names, which are short, lowercase strings. For -example, `flocker` or `weave`. +example, `myplugin`. Plugins can run inside or outside containers. Currently running them outside containers is recommended. @@ -45,12 +45,12 @@ spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker The name of the file (excluding the extension) determines the plugin name. -For example, the `flocker` plugin might create a Unix socket at -`/run/docker/plugins/flocker.sock`. +For example, a plugin named `myplugin` might create a Unix socket at +`/run/docker/plugins/myplugin.sock`. You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. -For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only -mount `/run/docker/plugins/flocker` inside the `flocker` container. +For example, you can create the `myplugin` socket under `/run/docker/plugins/myplugin/myplugin.sock` and only +mount `/run/docker/plugins/myplugin` inside the `myplugin` container. Docker always searches for Unix sockets in `/run/docker/plugins` first. It checks for spec or json files under `/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as diff --git a/_vendor/github.com/docker/cli/docs/extend/plugins_network.md b/_vendor/github.com/docker/cli/docs/extend/plugins_network.md index 8f94546b01b..0ff0a64b1b6 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugins_network.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugins_network.md @@ -61,11 +61,4 @@ plugin protocol The network driver protocol, in addition to the plugin activation call, is documented as part of libnetwork: -[https://github.com/moby/moby/blob/master/libnetwork/docs/remote.md](https://github.com/moby/moby/blob/master/libnetwork/docs/remote.md). - -## Related Information - -To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. - -- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/) -- The [LibNetwork](https://github.com/docker/libnetwork) project +[https://github.com/moby/moby/blob/master/daemon/libnetwork/docs/remote.md](https://github.com/moby/moby/blob/master/daemon/libnetwork/docs/remote.md). diff --git a/_vendor/github.com/docker/cli/docs/extend/plugins_services.md b/_vendor/github.com/docker/cli/docs/extend/plugins_services.md index f0c2dc503dd..fb914047b80 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugins_services.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugins_services.md @@ -38,7 +38,7 @@ node1 is the manager and node2 is the worker. ```console $ docker swarm join \ - --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \ + --token SWMTKN-1-aabbccdd00112233aabbccdd00112233aabbccdd00112233aa-aabbccdd00112233... \ 192.168.99.100:2377 ``` diff --git a/_vendor/github.com/docker/cli/docs/extend/plugins_volume.md b/_vendor/github.com/docker/cli/docs/extend/plugins_volume.md index 8da7deb742c..688350f6231 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugins_volume.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugins_volume.md @@ -45,7 +45,7 @@ accepts a volume name and path on the host, and the `--volume-driver` flag accepts a driver type. ```console -$ docker volume create --driver=flocker volumename +$ docker volume create --driver=myplugin volumename $ docker container run -it --volume volumename:/data busybox sh ``` @@ -61,11 +61,18 @@ separated by a colon (`:`) character. - The `Mountpoint` is the path on the host (v1) or in the plugin (v2) where the volume has been made available. -### `volumedriver` +### `--volume-driver` -Specifying a `volumedriver` in conjunction with a `volumename` allows you to -use plugins such as [Flocker](https://github.com/ScatterHQ/flocker) to manage -volumes external to a single host, such as those on EBS. +Specifying the `--volume-driver` flag together with a volume name (using +`--volume`) allows you to use plugins to manage volumes for the container. + +The `--volume-driver` flag is used as a default for all volumes created for +the container, including anonymous volumes. Use the [`--mount`] flag with +the [`volume-driver`] option to specify the driver to use for each volume +individually. + +[`--mount`]: https://docs.docker.com/reference/cli/docker/container/run/#mount +[`volume-driver`]: https://docs.docker.com/engine/storage/volumes/#start-a-container-which-creates-a-volume-using-a-volume-driver ## Create a VolumeDriver diff --git a/_vendor/github.com/docker/cli/docs/reference/dockerd.md b/_vendor/github.com/docker/cli/docs/reference/dockerd.md index b55b66c30b1..32ed51f9d04 100644 --- a/_vendor/github.com/docker/cli/docs/reference/dockerd.md +++ b/_vendor/github.com/docker/cli/docs/reference/dockerd.md @@ -24,10 +24,12 @@ A self-sufficient runtime for containers. Options: --add-runtime runtime Register an additional OCI compatible runtime (default []) + --allow-direct-routing Allow remote access to published ports on container IP addresses --authorization-plugin list Authorization plugins to load --bip string IPv4 address for the default bridge --bip6 string IPv6 address for the default bridge -b, --bridge string Attach containers to a network bridge + --bridge-accept-fwmark string In bridge networks, accept packets with this firewall mark/mask --cdi-spec-dir list CDI specification directories to use --cgroup-parent string Set parent cgroup for all containers --config-file string Daemon configuration file (default "/etc/docker/daemon.json") @@ -57,6 +59,7 @@ Options: --exec-root string Root directory for execution state files (default "/var/run/docker") --experimental Enable experimental features --feature map Enable feature in the daemon + --firewall-backend string Firewall backend to use, iptables or nftables --fixed-cidr string IPv4 subnet for the default bridge network --fixed-cidr-v6 string IPv6 subnet for the default bridge network -G, --group string Group for the unix socket (default "docker") @@ -607,7 +610,7 @@ $ sudo dockerd --add-runtime <runtime>=<path> Defining runtime arguments via the command line is not supported. -For an example configuration for a runc drop-in replacment, see +For an example configuration for a runc drop-in replacement, see [Alternative container runtimes > youki](https://docs.docker.com/engine/daemon/alternative-runtimes/#youki) ##### Configure the default container runtime @@ -839,42 +842,49 @@ $ docker run -it --add-host host.docker.internal:host-gateway \ PING host.docker.internal (2001:db8::1111): 56 data bytes ``` -### Enable CDI devices - -> [!NOTE] -> This is experimental feature and as such doesn't represent a stable API. -> -> This feature isn't enabled by default. To this feature, set `features.cdi` to -> `true` in the `daemon.json` configuration file. +### Configure CDI devices Container Device Interface (CDI) is a [standardized](https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md) mechanism for container runtimes to create containers which are able to interact with third party devices. +CDI is currently only supported for Linux containers and is enabled by default +since Docker Engine 28.3.0. + The Docker daemon supports running containers with CDI devices if the requested device specifications are available on the filesystem of the daemon. -The default specification directors are: +The default specification directories are: - `/etc/cdi/` for static CDI Specs - `/var/run/cdi` for generated CDI Specs -Alternatively, you can set custom locations for CDI specifications using the +#### Set custom locations + +To set custom locations for CDI specifications, use the `cdi-spec-dirs` option in the `daemon.json` configuration file, or the -`--cdi-spec-dir` flag for the `dockerd` CLI. +`--cdi-spec-dir` flag for the `dockerd` CLI: ```json { - "features": { - "cdi": true - }, "cdi-spec-dirs": ["/etc/cdi/", "/var/run/cdi"] } ``` -When CDI is enabled for a daemon, you can view the configured CDI specification -directories using the `docker info` command. +You can view the configured CDI specification directories using the `docker info` command. + +#### Disable CDI devices + +The feature in enabled by default. To disable it, use the `cdi` options in the `daemon.json` file: + +```json +"features": { + "cdi": false +}, +``` + +To check the status of the CDI devices, run `docker info`. #### Daemon logging format {#log-format} @@ -1057,18 +1067,20 @@ The following is a full example of the allowed configuration options on Linux: ```json { + "allow-direct-routing": false, "authorization-plugins": [], "bip": "", "bip6": "", "bridge": "", + "bridge-accept-fwmark": "", "builder": { "gc": { "enabled": true, - "defaultKeepStorage": "10GB", + "defaultReservedSpace": "10GB", "policy": [ - { "keepStorage": "10GB", "filter": ["unused-for=2200h"] }, - { "keepStorage": "50GB", "filter": ["unused-for=3300h"] }, - { "keepStorage": "100GB", "all": true } + { "maxUsedSpace": "512MB", "keepDuration": "48h", "filter": [ "type=source.local" ] }, + { "reservedSpace": "10GB", "maxUsedSpace": "100GB", "keepDuration": "1440h" }, + { "reservedSpace": "50GB", "minFreeSpace": "20GB", "maxUsedSpace": "200GB", "all": true } ] } }, @@ -1111,6 +1123,7 @@ The following is a full example of the allowed configuration options on Linux: "cdi": true, "containerd-snapshotter": true }, + "firewall-backend": "", "fixed-cidr": "", "fixed-cidr-v6": "", "group": "", @@ -1300,7 +1313,7 @@ The list of currently supported options that can be reconfigured is this: | ---------------------------------- | ----------------------------------------------------------------------------------------------------------- | | `debug` | Toggles debug mode of the daemon. | | `labels` | Replaces the daemon labels with a new set of labels. | -| `live-restore` | Toggles [live restore](https://docs.docker.com/engine/containers/live-restore/). | +| `live-restore` | Toggles [live restore](https://docs.docker.com/engine/daemon/live-restore/). | | `max-concurrent-downloads` | Configures the max concurrent downloads for each pull. | | `max-concurrent-uploads` | Configures the max concurrent uploads for each push. | | `max-download-attempts` | Configures the max download attempts for each pull. | diff --git a/_vendor/github.com/docker/cli/docs/reference/run.md b/_vendor/github.com/docker/cli/docs/reference/run.md index db06ad71f66..7da6e58008d 100644 --- a/_vendor/github.com/docker/cli/docs/reference/run.md +++ b/_vendor/github.com/docker/cli/docs/reference/run.md @@ -248,10 +248,18 @@ $ docker run -it --mount type=bind,source=[PATH],target=[PATH] busybox ``` In this case, the `--mount` flag takes three parameters. A type (`bind`), and -two paths. The `source` path is a the location on the host that you want to +two paths. The `source` path is the location on the host that you want to bind mount into the container. The `target` path is the mount destination inside the container. +By default, bind mounts require the source path to exist on the daemon host. If the +source path doesn't exist, an error is returned. To create the source path on +the daemon host if it doesn't exist, use the `bind-create-src` option: + +```console +$ docker run -it --mount type=bind,source=[PATH],target=[PATH],bind-create-src busybox +``` + Bind mounts are read-write by default, meaning that you can both read and write files to and from the mounted location from the container. Changes that you make, such as adding or editing files, are reflected on the host filesystem: @@ -419,7 +427,7 @@ $ docker run -it -m 300M ubuntu:24.04 /bin/bash We set memory limit only, this means the processes in the container can use 300M memory and 300M swap memory, by default, the total virtual memory size -(--memory-swap) will be set as double of memory, in this case, memory + swap +(`--memory-swap`) will be set as double of memory, in this case, memory + swap would be 2*300M, so processes can use 300M swap memory as well. ```console @@ -1087,7 +1095,7 @@ Additionally, you can set any environment variable in the container by using one or more `-e` flags. You can even override the variables mentioned above, or variables defined using a Dockerfile `ENV` instruction when building the image. -If the you name an environment variable without specifying a value, the current +If you name an environment variable without specifying a value, the current value of the named variable on the host is propagated into the container's environment: @@ -1214,7 +1222,7 @@ starting a container, you can override the `USER` instruction by passing the -u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command. ``` -The followings examples are all valid: +The following examples are all valid: ```text --user=[ user | user:group | uid | uid:gid | user:gid | uid:group ] diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_events.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_events.md deleted file mode 100644 index b71f4c993d5..00000000000 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_events.md +++ /dev/null @@ -1,54 +0,0 @@ -# docker compose events - -<!---MARKER_GEN_START--> -Stream container events for every container in the project. - -With the `--json` flag, a json object is printed one per line with the format: - -```json -{ - "time": "2015-11-20T18:01:03.615550", - "type": "container", - "action": "create", - "id": "213cf7...5fc39a", - "service": "web", - "attributes": { - "name": "application_web_1", - "image": "alpine:edge" - } -} -``` - -The events that can be received using this can be seen [here](/reference/cli/docker/system/events/#object-types). - -### Options - -| Name | Type | Default | Description | -|:------------|:-------|:--------|:------------------------------------------| -| `--dry-run` | `bool` | | Execute command in dry run mode | -| `--json` | `bool` | | Output events as a stream of json objects | - - -<!---MARKER_GEN_END--> - -## Description - -Stream container events for every container in the project. - -With the `--json` flag, a json object is printed one per line with the format: - -```json -{ - "time": "2015-11-20T18:01:03.615550", - "type": "container", - "action": "create", - "id": "213cf7...5fc39a", - "service": "web", - "attributes": { - "name": "application_web_1", - "image": "alpine:edge" - } -} -``` - -The events that can be received using this can be seen [here](https://docs.docker.com/reference/cli/docker/system/events/#object-types). diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_exec.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_exec.md deleted file mode 100644 index 8b54def472e..00000000000 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_exec.md +++ /dev/null @@ -1,30 +0,0 @@ -# docker compose exec - -<!---MARKER_GEN_START--> -This is the equivalent of `docker exec` targeting a Compose service. - -With this subcommand, you can run arbitrary commands in your services. Commands allocate a TTY by default, so -you can use a command such as `docker compose exec web sh` to get an interactive prompt. - -### Options - -| Name | Type | Default | Description | -|:------------------|:--------------|:--------|:---------------------------------------------------------------------------------| -| `-d`, `--detach` | `bool` | | Detached mode: Run command in the background | -| `--dry-run` | `bool` | | Execute command in dry run mode | -| `-e`, `--env` | `stringArray` | | Set environment variables | -| `--index` | `int` | `0` | Index of the container if service has multiple replicas | -| `-T`, `--no-TTY` | `bool` | `true` | Disable pseudo-TTY allocation. By default `docker compose exec` allocates a TTY. | -| `--privileged` | `bool` | | Give extended privileges to the process | -| `-u`, `--user` | `string` | | Run the command as this user | -| `-w`, `--workdir` | `string` | | Path to workdir directory for this command | - - -<!---MARKER_GEN_END--> - -## Description - -This is the equivalent of `docker exec` targeting a Compose service. - -With this subcommand, you can run arbitrary commands in your services. Commands allocate a TTY by default, so -you can use a command such as `docker compose exec web sh` to get an interactive prompt. diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_start.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_start.md deleted file mode 100644 index 08db7ef2135..00000000000 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_start.md +++ /dev/null @@ -1,17 +0,0 @@ -# docker compose start - -<!---MARKER_GEN_START--> -Starts existing containers for a service - -### Options - -| Name | Type | Default | Description | -|:------------|:-------|:--------|:--------------------------------| -| `--dry-run` | `bool` | | Execute command in dry run mode | - - -<!---MARKER_GEN_END--> - -## Description - -Starts existing containers for a service diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_events.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_events.yaml deleted file mode 100644 index fe6d4216ce1..00000000000 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_events.yaml +++ /dev/null @@ -1,54 +0,0 @@ -command: docker compose events -short: Receive real time events from containers -long: |- - Stream container events for every container in the project. - - With the `--json` flag, a json object is printed one per line with the format: - - ```json - { - "time": "2015-11-20T18:01:03.615550", - "type": "container", - "action": "create", - "id": "213cf7...5fc39a", - "service": "web", - "attributes": { - "name": "application_web_1", - "image": "alpine:edge" - } - } - ``` - - The events that can be received using this can be seen [here](/reference/cli/docker/system/events/#object-types). -usage: docker compose events [OPTIONS] [SERVICE...] -pname: docker compose -plink: docker_compose.yaml -options: - - option: json - value_type: bool - default_value: "false" - description: Output events as a stream of json objects - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -inherited_options: - - option: dry-run - value_type: bool - default_value: "false" - description: Execute command in dry run mode - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -deprecated: false -hidden: false -experimental: false -experimentalcli: false -kubernetes: false -swarm: false - diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_start.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_start.yaml deleted file mode 100644 index 902b688d3e7..00000000000 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_start.yaml +++ /dev/null @@ -1,24 +0,0 @@ -command: docker compose start -short: Start services -long: Starts existing containers for a service -usage: docker compose start [SERVICE...] -pname: docker compose -plink: docker_compose.yaml -inherited_options: - - option: dry-run - value_type: bool - default_value: "false" - description: Execute command in dry run mode - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -deprecated: false -hidden: false -experimental: false -experimentalcli: false -kubernetes: false -swarm: false - diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose.md similarity index 88% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose.md index d1a1c2a4627..d80bb86ec62 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose.md +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose.md @@ -1,3 +1,4 @@ + # docker compose ```text @@ -12,6 +13,7 @@ Define and run multi-container applications with Docker | Name | Description | |:--------------------------------|:----------------------------------------------------------------------------------------| | [`attach`](compose_attach.md) | Attach local standard input, output, and error streams to a service's running container | +| [`bridge`](compose_bridge.md) | Convert compose files into another model | | [`build`](compose_build.md) | Build or rebuild services | | [`commit`](compose_commit.md) | Create a new image from a service container's changes | | [`config`](compose_config.md) | Parse, resolve and render compose file in canonical format | @@ -42,6 +44,7 @@ Define and run multi-container applications with Docker | [`unpause`](compose_unpause.md) | Unpause services | | [`up`](compose_up.md) | Create and start containers | | [`version`](compose_version.md) | Show the Docker Compose version information | +| [`volumes`](compose_volumes.md) | List volumes | | [`wait`](compose_wait.md) | Block until containers of all (or specified) services stop. | | [`watch`](compose_watch.md) | Watch build context for service and rebuild/refresh containers when files are updated | @@ -58,7 +61,7 @@ Define and run multi-container applications with Docker | `-f`, `--file` | `stringArray` | | Compose configuration files | | `--parallel` | `int` | `-1` | Control max parallelism, -1 for unlimited | | `--profile` | `stringArray` | | Specify a profile to enable | -| `--progress` | `string` | `auto` | Set type of progress output (auto, tty, plain, json, quiet) | +| `--progress` | `string` | | Set type of progress output (auto, tty, plain, json, quiet) | | `--project-directory` | `string` | | Specify an alternate working directory<br>(default: the path of the, first specified, Compose file) | | `-p`, `--project-name` | `string` | | Project name | @@ -124,6 +127,57 @@ get the postgres image for the db service from anywhere by using the `-f` flag a $ docker compose -f ~/sandbox/rails/compose.yaml pull db ``` +#### Using an OCI published artifact +You can use the `-f` flag with the `oci://` prefix to reference a Compose file that has been published to an OCI registry. +This allows you to distribute and version your Compose configurations as OCI artifacts. + +To use a Compose file from an OCI registry: + +```console +$ docker compose -f oci://registry.example.com/my-compose-project:latest up +``` + +You can also combine OCI artifacts with local files: + +```console +$ docker compose -f oci://registry.example.com/my-compose-project:v1.0 -f compose.override.yaml up +``` + +The OCI artifact must contain a valid Compose file. You can publish Compose files to an OCI registry using the +`docker compose publish` command. + +#### Using a git repository +You can use the `-f` flag to reference a Compose file from a git repository. Compose supports various git URL formats: + +Using HTTPS: +```console +$ docker compose -f https://github.com/user/repo.git up +``` + +Using SSH: +```console +$ docker compose -f git@github.com:user/repo.git up +``` + +You can specify a specific branch, tag, or commit: +```console +$ docker compose -f https://github.com/user/repo.git@main up +$ docker compose -f https://github.com/user/repo.git@v1.0.0 up +$ docker compose -f https://github.com/user/repo.git@abc123 up +``` + +You can also specify a subdirectory within the repository: +```console +$ docker compose -f https://github.com/user/repo.git#main:path/to/compose.yaml up +``` + +When using git resources, Compose will clone the repository and use the specified Compose file. You can combine +git resources with local files: + +```console +$ docker compose -f https://github.com/user/repo.git -f compose.override.yaml up +``` + ### Use `-p` to specify a project name Each configuration has a project name. Compose sets the project name using diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_alpha.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_alpha.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_alpha.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_dry-run.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_dry-run.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_dry-run.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_dry-run.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_generate.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_generate.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_generate.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_generate.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_publish.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_publish.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_publish.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_publish.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_scale.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_scale.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_scale.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_scale.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_viz.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_viz.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_viz.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_viz.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_watch.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_watch.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_watch.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_alpha_watch.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_attach.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_attach.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_attach.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_attach.md diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge.md new file mode 100644 index 00000000000..78d3da4934c --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge.md @@ -0,0 +1,22 @@ +# docker compose bridge + +<!---MARKER_GEN_START--> +Convert compose files into another model + +### Subcommands + +| Name | Description | +|:-------------------------------------------------------|:-----------------------------------------------------------------------------| +| [`convert`](compose_bridge_convert.md) | Convert compose files to Kubernetes manifests, Helm charts, or another model | +| [`transformations`](compose_bridge_transformations.md) | Manage transformation images | + + +### Options + +| Name | Type | Default | Description | +|:------------|:-------|:--------|:--------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_convert.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_convert.md new file mode 100644 index 00000000000..d4b91ba172d --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_convert.md @@ -0,0 +1,17 @@ +# docker compose bridge convert + +<!---MARKER_GEN_START--> +Convert compose files to Kubernetes manifests, Helm charts, or another model + +### Options + +| Name | Type | Default | Description | +|:-------------------------|:--------------|:--------|:-------------------------------------------------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `-o`, `--output` | `string` | `out` | The output directory for the Kubernetes resources | +| `--templates` | `string` | | Directory containing transformation templates | +| `-t`, `--transformation` | `stringArray` | | Transformation to apply to compose model (default: docker/compose-bridge-kubernetes) | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations.md new file mode 100644 index 00000000000..1e1c7be392b --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations.md @@ -0,0 +1,22 @@ +# docker compose bridge transformations + +<!---MARKER_GEN_START--> +Manage transformation images + +### Subcommands + +| Name | Description | +|:-----------------------------------------------------|:-------------------------------| +| [`create`](compose_bridge_transformations_create.md) | Create a new transformation | +| [`list`](compose_bridge_transformations_list.md) | List available transformations | + + +### Options + +| Name | Type | Default | Description | +|:------------|:-------|:--------|:--------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations_create.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations_create.md new file mode 100644 index 00000000000..187e8d9eca3 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations_create.md @@ -0,0 +1,15 @@ +# docker compose bridge transformations create + +<!---MARKER_GEN_START--> +Create a new transformation + +### Options + +| Name | Type | Default | Description | +|:---------------|:---------|:--------|:----------------------------------------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `-f`, `--from` | `string` | | Existing transformation to copy (default: docker/compose-bridge-kubernetes) | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations_list.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations_list.md new file mode 100644 index 00000000000..ce0a5e6911a --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_bridge_transformations_list.md @@ -0,0 +1,20 @@ +# docker compose bridge transformations list + +<!---MARKER_GEN_START--> +List available transformations + +### Aliases + +`docker compose bridge transformations list`, `docker compose bridge transformations ls` + +### Options + +| Name | Type | Default | Description | +|:----------------|:---------|:--------|:-------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `--format` | `string` | `table` | Format the output. Values: [table \| json] | +| `-q`, `--quiet` | `bool` | | Only display transformer names | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_build.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_build.md similarity index 87% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_build.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_build.md index 5589a46934c..a715974dfa5 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_build.md +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_build.md @@ -22,9 +22,11 @@ run `docker compose build` to rebuild it. | `-m`, `--memory` | `bytes` | `0` | Set memory limit for the build container. Not supported by BuildKit. | | `--no-cache` | `bool` | | Do not use cache when building the image | | `--print` | `bool` | | Print equivalent bake file | +| `--provenance` | `string` | | Add a provenance attestation | | `--pull` | `bool` | | Always attempt to pull a newer version of the image | | `--push` | `bool` | | Push service images | -| `-q`, `--quiet` | `bool` | | Don't print anything to STDOUT | +| `-q`, `--quiet` | `bool` | | Suppress the build output | +| `--sbom` | `string` | | Add a SBOM attestation | | `--ssh` | `string` | | Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent) | | `--with-dependencies` | `bool` | | Also build dependencies (transitively) | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_commit.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_commit.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_commit.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_commit.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_config.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_config.md similarity index 88% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_config.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_config.md index 78c1835a527..e2e773feae5 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_config.md +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_config.md @@ -5,10 +5,6 @@ It merges the Compose files set by `-f` flags, resolves variables in the Compose file, and expands short-notation into the canonical format. -### Aliases - -`docker compose config`, `docker compose convert` - ### Options | Name | Type | Default | Description | @@ -18,6 +14,9 @@ the canonical format. | `--format` | `string` | | Format the output. Values: [yaml \| json] | | `--hash` | `string` | | Print the service config hash, one per line. | | `--images` | `bool` | | Print the image names, one per line. | +| `--lock-image-digests` | `bool` | | Produces an override file with image digests | +| `--models` | `bool` | | Print the model names, one per line. | +| `--networks` | `bool` | | Print the network names, one per line. | | `--no-consistency` | `bool` | | Don't check model consistency - warning: may produce invalid Compose output | | `--no-env-resolution` | `bool` | | Don't resolve service env files | | `--no-interpolate` | `bool` | | Don't interpolate environment variables | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_cp.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_cp.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_cp.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_cp.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_create.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_create.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_create.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_create.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_down.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_down.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_down.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_down.md diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_events.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_events.md new file mode 100644 index 00000000000..066b5cf3831 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_events.md @@ -0,0 +1,56 @@ +# docker compose events + +<!---MARKER_GEN_START--> +Stream container events for every container in the project. + +With the `--json` flag, a json object is printed one per line with the format: + +```json +{ + "time": "2015-11-20T18:01:03.615550", + "type": "container", + "action": "create", + "id": "213cf7...5fc39a", + "service": "web", + "attributes": { + "name": "application_web_1", + "image": "alpine:edge" + } +} +``` + +The events that can be received using this can be seen [here](/reference/cli/docker/system/events/#object-types). + +### Options + +| Name | Type | Default | Description | +|:------------|:---------|:--------|:------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `--json` | `bool` | | Output events as a stream of json objects | +| `--since` | `string` | | Show all events created since timestamp | +| `--until` | `string` | | Stream events until this timestamp | + + +<!---MARKER_GEN_END--> + +## Description + +Stream container events for every container in the project. + +With the `--json` flag, a json object is printed one per line with the format: + +```json +{ + "time": "2015-11-20T18:01:03.615550", + "type": "container", + "action": "create", + "id": "213cf7...5fc39a", + "service": "web", + "attributes": { + "name": "application_web_1", + "image": "alpine:edge" + } +} +``` + +The events that can be received using this can be seen [here](https://docs.docker.com/reference/cli/docker/system/events/#object-types). diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_exec.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_exec.md new file mode 100644 index 00000000000..312219e7316 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_exec.md @@ -0,0 +1,42 @@ +# docker compose exec + +<!---MARKER_GEN_START--> +This is the equivalent of `docker exec` targeting a Compose service. + +With this subcommand, you can run arbitrary commands in your services. Commands allocate a TTY by default, so +you can use a command such as `docker compose exec web sh` to get an interactive prompt. + +By default, Compose will enter container in interactive mode and allocate a TTY, while the equivalent `docker exec` +command requires passing `--interactive --tty` flags to get the same behavior. Compose also support those two flags +to offer a smooth migration between commands, whenever they are no-op by default. Still, `interactive` can be used to +force disabling interactive mode (`--interactive=false`), typically when `docker compose exec` command is used inside +a script. + +### Options + +| Name | Type | Default | Description | +|:------------------|:--------------|:--------|:---------------------------------------------------------------------------------| +| `-d`, `--detach` | `bool` | | Detached mode: Run command in the background | +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `-e`, `--env` | `stringArray` | | Set environment variables | +| `--index` | `int` | `0` | Index of the container if service has multiple replicas | +| `-T`, `--no-tty` | `bool` | `true` | Disable pseudo-TTY allocation. By default 'docker compose exec' allocates a TTY. | +| `--privileged` | `bool` | | Give extended privileges to the process | +| `-u`, `--user` | `string` | | Run the command as this user | +| `-w`, `--workdir` | `string` | | Path to workdir directory for this command | + + +<!---MARKER_GEN_END--> + +## Description + +This is the equivalent of `docker exec` targeting a Compose service. + +With this subcommand, you can run arbitrary commands in your services. Commands allocate a TTY by default, so +you can use a command such as `docker compose exec web sh` to get an interactive prompt. + +By default, Compose will enter container in interactive mode and allocate a TTY, while the equivalent `docker exec` +command requires passing `--interactive --tty` flags to get the same behavior. Compose also support those two flags +to offer a smooth migration between commands, whenever they are no-op by default. Still, `interactive` can be used to +force disabling interactive mode (`--interactive=false`), typically when `docker compose exec` command is used inside +a script. \ No newline at end of file diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_export.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_export.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_export.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_export.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_images.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_images.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_images.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_images.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_kill.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_kill.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_kill.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_kill.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_logs.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_logs.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_logs.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_logs.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_ls.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_ls.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_ls.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_ls.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_pause.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_pause.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_pause.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_pause.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_port.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_port.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_port.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_port.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_ps.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_ps.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_ps.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_ps.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_publish.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_publish.md similarity index 88% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_publish.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_publish.md index 8e5d181336b..9a82fc260a7 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_publish.md +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_publish.md @@ -7,6 +7,7 @@ Publish compose application | Name | Type | Default | Description | |:--------------------------|:---------|:--------|:-------------------------------------------------------------------------------| +| `--app` | `bool` | | Published compose application (includes referenced images) | | `--dry-run` | `bool` | | Execute command in dry run mode | | `--oci-version` | `string` | | OCI image/artifact specification version (automatically determined by default) | | `--resolve-image-digests` | `bool` | | Pin image tags to digests | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_pull.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_pull.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_pull.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_pull.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_push.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_push.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_push.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_push.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_restart.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_restart.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_restart.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_restart.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_rm.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_rm.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_rm.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_rm.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_run.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_run.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_run.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_run.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_scale.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_scale.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_scale.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_scale.md diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_start.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_start.md new file mode 100644 index 00000000000..06229e5940e --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_start.md @@ -0,0 +1,19 @@ +# docker compose start + +<!---MARKER_GEN_START--> +Starts existing containers for a service + +### Options + +| Name | Type | Default | Description | +|:-----------------|:-------|:--------|:---------------------------------------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `--wait` | `bool` | | Wait for services to be running\|healthy. Implies detached mode. | +| `--wait-timeout` | `int` | `0` | Maximum duration in seconds to wait for the project to be running\|healthy | + + +<!---MARKER_GEN_END--> + +## Description + +Starts existing containers for a service diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_stats.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_stats.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_stats.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_stats.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_stop.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_stop.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_stop.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_stop.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_top.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_top.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_top.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_top.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_unpause.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_unpause.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_unpause.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_unpause.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_up.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_up.md similarity index 97% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_up.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_up.md index b831cb16d34..b7f17a0fac9 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_up.md +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_up.md @@ -44,6 +44,7 @@ If the process is interrupted using `SIGINT` (ctrl + C) or `SIGTERM`, the contai | `--no-recreate` | `bool` | | If containers already exist, don't recreate them. Incompatible with --force-recreate. | | `--no-start` | `bool` | | Don't start the services after creating them | | `--pull` | `string` | `policy` | Pull image before running ("always"\|"missing"\|"never") | +| `--quiet-build` | `bool` | | Suppress the build output | | `--quiet-pull` | `bool` | | Pull without printing progress information | | `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file | | `-V`, `--renew-anon-volumes` | `bool` | | Recreate anonymous volumes instead of retrieving data from the previous containers | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_version.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_version.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_version.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_version.md diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/compose_volumes.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_volumes.md new file mode 100644 index 00000000000..6bad874f187 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/compose_volumes.md @@ -0,0 +1,16 @@ +# docker compose volumes + +<!---MARKER_GEN_START--> +List volumes + +### Options + +| Name | Type | Default | Description | +|:----------------|:---------|:--------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `--format` | `string` | `table` | Format output using a custom template:<br>'table': Print output in table format with column headers (default)<br>'table TEMPLATE': Print output in table format using the given Go template<br>'json': Print in JSON format<br>'TEMPLATE': Print output using the given Go template.<br>Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates | +| `-q`, `--quiet` | `bool` | | Only display volume names | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_wait.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_wait.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_wait.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_wait.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_watch.md b/_vendor/github.com/docker/compose/v5/docs/reference/compose_watch.md similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/compose_watch.md rename to _vendor/github.com/docker/compose/v5/docs/reference/compose_watch.md diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose.yaml similarity index 87% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose.yaml index 58ec47802a5..c5fdb937510 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose.yaml @@ -6,6 +6,7 @@ pname: docker plink: docker.yaml cname: - docker compose attach + - docker compose bridge - docker compose build - docker compose commit - docker compose config @@ -36,10 +37,12 @@ cname: - docker compose unpause - docker compose up - docker compose version + - docker compose volumes - docker compose wait - docker compose watch clink: - docker_compose_attach.yaml + - docker_compose_bridge.yaml - docker_compose_build.yaml - docker_compose_commit.yaml - docker_compose_config.yaml @@ -70,6 +73,7 @@ clink: - docker_compose_unpause.yaml - docker_compose_up.yaml - docker_compose_version.yaml + - docker_compose_volumes.yaml - docker_compose_wait.yaml - docker_compose_watch.yaml options: @@ -135,6 +139,17 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: insecure-registry + value_type: stringArray + default_value: '[]' + description: | + Use insecure registry to pull Compose OCI artifacts. Doesn't apply to images + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: no-ansi value_type: bool default_value: "false" @@ -167,7 +182,6 @@ options: swarm: false - option: progress value_type: string - default_value: auto description: Set type of progress output (auto, tty, plain, json, quiet) deprecated: false hidden: false @@ -287,6 +301,57 @@ examples: |- $ docker compose -f ~/sandbox/rails/compose.yaml pull db ``` + #### Using an OCI published artifact + You can use the `-f` flag with the `oci://` prefix to reference a Compose file that has been published to an OCI registry. + This allows you to distribute and version your Compose configurations as OCI artifacts. + + To use a Compose file from an OCI registry: + + ```console + $ docker compose -f oci://registry.example.com/my-compose-project:latest up + ``` + + You can also combine OCI artifacts with local files: + + ```console + $ docker compose -f oci://registry.example.com/my-compose-project:v1.0 -f compose.override.yaml up + ``` + + The OCI artifact must contain a valid Compose file. You can publish Compose files to an OCI registry using the + `docker compose publish` command. + + #### Using a git repository + You can use the `-f` flag to reference a Compose file from a git repository. Compose supports various git URL formats: + + Using HTTPS: + ```console + $ docker compose -f https://github.com/user/repo.git up + ``` + + Using SSH: + ```console + $ docker compose -f git@github.com:user/repo.git up + ``` + + You can specify a specific branch, tag, or commit: + ```console + $ docker compose -f https://github.com/user/repo.git@main up + $ docker compose -f https://github.com/user/repo.git@v1.0.0 up + $ docker compose -f https://github.com/user/repo.git@abc123 up + ``` + + You can also specify a subdirectory within the repository: + ```console + $ docker compose -f https://github.com/user/repo.git#main:path/to/compose.yaml up + ``` + + When using git resources, Compose will clone the repository and use the specified Compose file. You can combine + git resources with local files: + + ```console + $ docker compose -f https://github.com/user/repo.git -f compose.override.yaml up + ``` + ### Use `-p` to specify a project name Each configuration has a project name. Compose sets the project name using diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_dry-run.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_dry-run.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_dry-run.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_dry-run.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_generate.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_generate.yaml similarity index 99% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_generate.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_generate.yaml index 0932af080ec..f31429c2d72 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_generate.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_generate.yaml @@ -45,7 +45,7 @@ inherited_options: kubernetes: false swarm: false deprecated: false -hidden: false +hidden: true experimental: false experimentalcli: true kubernetes: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_publish.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_publish.yaml similarity index 75% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_publish.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_publish.yaml index 1566677472a..9059cbf4869 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_publish.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_publish.yaml @@ -5,6 +5,26 @@ usage: docker compose alpha publish [OPTIONS] REPOSITORY[:TAG] pname: docker compose alpha plink: docker_compose_alpha.yaml options: + - option: app + value_type: bool + default_value: "false" + description: Published compose application (includes referenced images) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: insecure-registry + value_type: bool + default_value: "false" + description: Use insecure registry + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: oci-version value_type: string description: | @@ -58,7 +78,7 @@ inherited_options: kubernetes: false swarm: false deprecated: false -hidden: false +hidden: true experimental: false experimentalcli: true kubernetes: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_scale.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_scale.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_scale.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_scale.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_viz.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_viz.yaml similarity index 99% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_viz.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_viz.yaml index b179d648ef8..c07475caac8 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_viz.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_viz.yaml @@ -69,7 +69,7 @@ inherited_options: kubernetes: false swarm: false deprecated: false -hidden: false +hidden: true experimental: false experimentalcli: true kubernetes: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_watch.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_watch.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_watch.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_alpha_watch.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_attach.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_attach.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_attach.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_attach.yaml diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge.yaml new file mode 100644 index 00000000000..5ef9ebf5585 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge.yaml @@ -0,0 +1,29 @@ +command: docker compose bridge +short: Convert compose files into another model +long: Convert compose files into another model +pname: docker compose +plink: docker_compose.yaml +cname: + - docker compose bridge convert + - docker compose bridge transformations +clink: + - docker_compose_bridge_convert.yaml + - docker_compose_bridge_transformations.yaml +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_convert.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_convert.yaml new file mode 100644 index 00000000000..f55f0b233c3 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_convert.yaml @@ -0,0 +1,59 @@ +command: docker compose bridge convert +short: | + Convert compose files to Kubernetes manifests, Helm charts, or another model +long: | + Convert compose files to Kubernetes manifests, Helm charts, or another model +usage: docker compose bridge convert +pname: docker compose bridge +plink: docker_compose_bridge.yaml +options: + - option: output + shorthand: o + value_type: string + default_value: out + description: The output directory for the Kubernetes resources + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: templates + value_type: string + description: Directory containing transformation templates + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: transformation + shorthand: t + value_type: stringArray + default_value: '[]' + description: | + Transformation to apply to compose model (default: docker/compose-bridge-kubernetes) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations.yaml new file mode 100644 index 00000000000..2ab5661f0b2 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations.yaml @@ -0,0 +1,29 @@ +command: docker compose bridge transformations +short: Manage transformation images +long: Manage transformation images +pname: docker compose bridge +plink: docker_compose_bridge.yaml +cname: + - docker compose bridge transformations create + - docker compose bridge transformations list +clink: + - docker_compose_bridge_transformations_create.yaml + - docker_compose_bridge_transformations_list.yaml +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations_create.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations_create.yaml new file mode 100644 index 00000000000..e8dd9e58a51 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations_create.yaml @@ -0,0 +1,36 @@ +command: docker compose bridge transformations create +short: Create a new transformation +long: Create a new transformation +usage: docker compose bridge transformations create [OPTION] PATH +pname: docker compose bridge transformations +plink: docker_compose_bridge_transformations.yaml +options: + - option: from + shorthand: f + value_type: string + description: | + Existing transformation to copy (default: docker/compose-bridge-kubernetes) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations_list.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations_list.yaml new file mode 100644 index 00000000000..3afd3a84b8e --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_bridge_transformations_list.yaml @@ -0,0 +1,47 @@ +command: docker compose bridge transformations list +aliases: docker compose bridge transformations list, docker compose bridge transformations ls +short: List available transformations +long: List available transformations +usage: docker compose bridge transformations list +pname: docker compose bridge transformations +plink: docker_compose_bridge_transformations.yaml +options: + - option: format + value_type: string + default_value: table + description: 'Format the output. Values: [table | json]' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only display transformer names + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_build.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_build.yaml similarity index 91% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_build.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_build.yaml index 1197d5314c4..e645a40aac2 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_build.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_build.yaml @@ -118,7 +118,6 @@ options: swarm: false - option: progress value_type: string - default_value: auto description: Set type of ui output (auto, tty, plain, json, quiet) deprecated: false hidden: true @@ -126,6 +125,15 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: provenance + value_type: string + description: Add a provenance attestation + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: pull value_type: bool default_value: "false" @@ -150,7 +158,16 @@ options: shorthand: q value_type: bool default_value: "false" - description: Don't print anything to STDOUT + description: Suppress the build output + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: sbom + value_type: string + description: Add a SBOM attestation deprecated: false hidden: false experimental: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_commit.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_commit.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_commit.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_commit.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_config.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_config.yaml similarity index 86% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_config.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_config.yaml index 7ec479b2000..3efc922b219 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_config.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_config.yaml @@ -1,5 +1,4 @@ command: docker compose config -aliases: docker compose config, docker compose convert short: Parse, resolve and render compose file in canonical format long: |- `docker compose config` renders the actual data model to be applied on the Docker Engine. @@ -47,6 +46,36 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: lock-image-digests + value_type: bool + default_value: "false" + description: Produces an override file with image digests + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: models + value_type: bool + default_value: "false" + description: Print the model names, one per line. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: networks + value_type: bool + default_value: "false" + description: Print the network names, one per line. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: no-consistency value_type: bool default_value: "false" diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_convert.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_convert.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_convert.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_convert.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_cp.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_cp.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_cp.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_cp.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_create.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_create.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_create.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_create.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_down.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_down.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_down.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_down.yaml diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_events.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_events.yaml new file mode 100644 index 00000000000..7c4cb4297f9 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_events.yaml @@ -0,0 +1,72 @@ +command: docker compose events +short: Receive real time events from containers +long: |- + Stream container events for every container in the project. + + With the `--json` flag, a json object is printed one per line with the format: + + ```json + { + "time": "2015-11-20T18:01:03.615550", + "type": "container", + "action": "create", + "id": "213cf7...5fc39a", + "service": "web", + "attributes": { + "name": "application_web_1", + "image": "alpine:edge" + } + } + ``` + + The events that can be received using this can be seen [here](/reference/cli/docker/system/events/#object-types). +usage: docker compose events [OPTIONS] [SERVICE...] +pname: docker compose +plink: docker_compose.yaml +options: + - option: json + value_type: bool + default_value: "false" + description: Output events as a stream of json objects + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: since + value_type: string + description: Show all events created since timestamp + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: until + value_type: string + description: Stream events until this timestamp + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_exec.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_exec.yaml similarity index 84% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_exec.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_exec.yaml index b2a1cf20685..66ecfddab8d 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_exec.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_exec.yaml @@ -5,6 +5,12 @@ long: |- With this subcommand, you can run arbitrary commands in your services. Commands allocate a TTY by default, so you can use a command such as `docker compose exec web sh` to get an interactive prompt. + + By default, Compose will enter container in interactive mode and allocate a TTY, while the equivalent `docker exec` + command requires passing `--interactive --tty` flags to get the same behavior. Compose also support those two flags + to offer a smooth migration between commands, whenever they are no-op by default. Still, `interactive` can be used to + force disabling interactive mode (`--interactive=false`), typically when `docker compose exec` command is used inside + a script. usage: docker compose exec [OPTIONS] SERVICE COMMAND [ARGS...] pname: docker compose plink: docker_compose.yaml @@ -52,12 +58,12 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: no-TTY + - option: no-tty shorthand: T value_type: bool default_value: "true" description: | - Disable pseudo-TTY allocation. By default `docker compose exec` allocates a TTY. + Disable pseudo-TTY allocation. By default 'docker compose exec' allocates a TTY. deprecated: false hidden: false experimental: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_export.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_export.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_export.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_export.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_images.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_images.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_images.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_images.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_kill.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_kill.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_kill.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_kill.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_logs.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_logs.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_logs.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_logs.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_ls.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_ls.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_ls.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_ls.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_pause.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_pause.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_pause.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_pause.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_port.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_port.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_port.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_port.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_ps.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_ps.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_ps.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_ps.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_publish.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_publish.yaml similarity index 76% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_publish.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_publish.yaml index 44a7a46dd42..c3189d89c57 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_publish.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_publish.yaml @@ -5,6 +5,26 @@ usage: docker compose publish [OPTIONS] REPOSITORY[:TAG] pname: docker compose plink: docker_compose.yaml options: + - option: app + value_type: bool + default_value: "false" + description: Published compose application (includes referenced images) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: insecure-registry + value_type: bool + default_value: "false" + description: Use insecure registry + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: oci-version value_type: string description: | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_pull.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_pull.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_pull.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_pull.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_push.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_push.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_push.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_push.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_restart.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_restart.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_restart.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_restart.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_rm.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_rm.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_rm.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_rm.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_run.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_run.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_run.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_run.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_scale.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_scale.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_scale.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_scale.yaml diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_start.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_start.yaml new file mode 100644 index 00000000000..56f9bcdff7c --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_start.yaml @@ -0,0 +1,46 @@ +command: docker compose start +short: Start services +long: Starts existing containers for a service +usage: docker compose start [SERVICE...] +pname: docker compose +plink: docker_compose.yaml +options: + - option: wait + value_type: bool + default_value: "false" + description: Wait for services to be running|healthy. Implies detached mode. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: wait-timeout + value_type: int + default_value: "0" + description: | + Maximum duration in seconds to wait for the project to be running|healthy + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_stats.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_stats.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_stats.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_stats.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_stop.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_stop.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_stop.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_stop.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_top.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_top.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_top.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_top.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_unpause.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_unpause.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_unpause.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_unpause.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_up.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_up.yaml similarity index 97% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_up.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_up.yaml index 47e0c5259eb..8c78a8fa683 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_up.yaml +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_up.yaml @@ -211,6 +211,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: quiet-build + value_type: bool + default_value: "false" + description: Suppress the build output + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: quiet-pull value_type: bool default_value: "false" diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_version.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_version.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_version.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_version.yaml diff --git a/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_volumes.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_volumes.yaml new file mode 100644 index 00000000000..20516db7f13 --- /dev/null +++ b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_volumes.yaml @@ -0,0 +1,52 @@ +command: docker compose volumes +short: List volumes +long: List volumes +usage: docker compose volumes [OPTIONS] [SERVICE...] +pname: docker compose +plink: docker_compose.yaml +options: + - option: format + value_type: string + default_value: table + description: |- + Format output using a custom template: + 'table': Print output in table format with column headers (default) + 'table TEMPLATE': Print output in table format using the given Go template + 'json': Print in JSON format + 'TEMPLATE': Print output using the given Go template. + Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only display volume names + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_wait.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_wait.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_wait.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_wait.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_watch.yaml b/_vendor/github.com/docker/compose/v5/docs/reference/docker_compose_watch.yaml similarity index 100% rename from _vendor/github.com/docker/compose/v2/docs/reference/docker_compose_watch.yaml rename to _vendor/github.com/docker/compose/v5/docs/reference/docker_compose_watch.yaml diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model.yaml new file mode 100644 index 00000000000..6d1588f6f05 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model.yaml @@ -0,0 +1,74 @@ +command: docker model +short: Docker Model Runner +long: |- + Use Docker Model Runner to run and interact with AI models directly from the command line. + For more information, see the [documentation](/ai/model-runner/) +pname: docker +plink: docker.yaml +cname: + - docker model bench + - docker model context + - docker model df + - docker model gateway + - docker model inspect + - docker model install-runner + - docker model launch + - docker model list + - docker model logs + - docker model package + - docker model ps + - docker model pull + - docker model purge + - docker model push + - docker model reinstall-runner + - docker model requests + - docker model restart-runner + - docker model rm + - docker model run + - docker model search + - docker model show + - docker model skills + - docker model start-runner + - docker model status + - docker model stop-runner + - docker model tag + - docker model uninstall-runner + - docker model unload + - docker model version +clink: + - docker_model_bench.yaml + - docker_model_context.yaml + - docker_model_df.yaml + - docker_model_gateway.yaml + - docker_model_inspect.yaml + - docker_model_install-runner.yaml + - docker_model_launch.yaml + - docker_model_list.yaml + - docker_model_logs.yaml + - docker_model_package.yaml + - docker_model_ps.yaml + - docker_model_pull.yaml + - docker_model_purge.yaml + - docker_model_push.yaml + - docker_model_reinstall-runner.yaml + - docker_model_requests.yaml + - docker_model_restart-runner.yaml + - docker_model_rm.yaml + - docker_model_run.yaml + - docker_model_search.yaml + - docker_model_show.yaml + - docker_model_skills.yaml + - docker_model_start-runner.yaml + - docker_model_status.yaml + - docker_model_stop-runner.yaml + - docker_model_tag.yaml + - docker_model_uninstall-runner.yaml + - docker_model_unload.yaml + - docker_model_version.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_bench.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_bench.yaml new file mode 100644 index 00000000000..65071532f5f --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_bench.yaml @@ -0,0 +1,69 @@ +command: docker model bench +short: Benchmark a model's performance at different concurrency levels +long: |- + Benchmark a model's performance showing tokens per second at different concurrency levels. + + This command runs a series of benchmarks with 1, 2, 4, and 8 concurrent requests by default, + measuring the tokens per second (TPS) that the model can generate. +usage: docker model bench MODEL +pname: docker model +plink: docker_model.yaml +options: + - option: concurrency + value_type: intSlice + default_value: '[1,2,4,8]' + description: Concurrency levels to test + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: duration + value_type: duration + default_value: 30s + description: Duration to run each concurrency test + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: json + value_type: bool + default_value: "false" + description: Output results in JSON format + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: prompt + value_type: string + default_value: | + Write a comprehensive 100 word summary on whales and their impact on society. + description: Prompt to use for benchmarking + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: timeout + value_type: duration + default_value: 5m0s + description: Timeout for each individual request + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose.yaml new file mode 100644 index 00000000000..4ed9874c0f1 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose.yaml @@ -0,0 +1,28 @@ +command: docker model compose +pname: docker model +plink: docker_model.yaml +cname: + - docker model compose down + - docker model compose metadata + - docker model compose up +clink: + - docker_model_compose_down.yaml + - docker_model_compose_metadata.yaml + - docker_model_compose_up.yaml +options: + - option: project-name + value_type: string + description: compose project name + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_down.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_down.yaml new file mode 100644 index 00000000000..39ff0baf59e --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_down.yaml @@ -0,0 +1,21 @@ +command: docker model compose down +usage: docker model compose down +pname: docker model compose +plink: docker_model_compose.yaml +inherited_options: + - option: project-name + value_type: string + description: compose project name + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_metadata.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_metadata.yaml new file mode 100644 index 00000000000..d618dcc588a --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_metadata.yaml @@ -0,0 +1,23 @@ +command: docker model compose metadata +short: Metadata for Docker Compose +long: Metadata for Docker Compose +usage: docker model compose metadata +pname: docker model compose +plink: docker_model_compose.yaml +inherited_options: + - option: project-name + value_type: string + description: compose project name + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_up.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_up.yaml new file mode 100644 index 00000000000..17e91577948 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_compose_up.yaml @@ -0,0 +1,90 @@ +command: docker model compose up +usage: docker model compose up +pname: docker model compose +plink: docker_model_compose.yaml +options: + - option: backend + value_type: string + default_value: llama.cpp + description: inference backend to use + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: context-size + value_type: int64 + default_value: "-1" + description: context size for the model + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: model + value_type: stringArray + default_value: '[]' + description: model to use + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: runtime-flags + value_type: string + description: raw runtime flags to pass to the inference engine + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: speculative-draft-model + value_type: string + description: draft model for speculative decoding + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: speculative-min-acceptance-rate + value_type: float64 + default_value: "0" + description: minimum acceptance rate for speculative decoding + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: speculative-num-tokens + value_type: int + default_value: "0" + description: number of tokens to predict speculatively + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: project-name + value_type: string + description: compose project name + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_configure.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_configure.yaml new file mode 100644 index 00000000000..77f914fdcf6 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_configure.yaml @@ -0,0 +1,105 @@ +command: docker model configure +aliases: docker model configure, docker model config +short: Manage model runtime configurations +long: Manage model runtime configurations +usage: docker model configure [--context-size=<n>] [--speculative-draft-model=<model>] [--hf_overrides=<json>] [--gpu-memory-utilization=<float>] [--mode=<mode>] [--think] [--keep-alive=<duration>] MODEL [-- <runtime-flags...>] +pname: docker model +plink: docker_model.yaml +cname: + - docker model configure show +clink: + - docker_model_configure_show.yaml +options: + - option: context-size + value_type: int32 + description: context size (in tokens) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gpu-memory-utilization + value_type: float64 + description: | + fraction of GPU memory to use for the model executor (0.0-1.0) - vLLM only + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: hf_overrides + value_type: string + description: HuggingFace model config overrides (JSON) - vLLM only + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: keep-alive + value_type: string + description: | + duration to keep model loaded (e.g., '5m', '1h', '0' to unload immediately, '-1' to never unload) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: mode + value_type: string + description: | + backend operation mode (completion, embedding, reranking, image-generation) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: speculative-draft-model + value_type: string + description: draft model for speculative decoding + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: speculative-min-acceptance-rate + value_type: float64 + default_value: "0" + description: minimum acceptance rate for speculative decoding + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: speculative-num-tokens + value_type: int + default_value: "0" + description: number of tokens to predict speculatively + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: think + value_type: bool + description: enable reasoning mode for thinking models + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_configure_show.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_configure_show.yaml new file mode 100644 index 00000000000..588c5c1b3e8 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_configure_show.yaml @@ -0,0 +1,13 @@ +command: docker model configure show +short: Show model configurations +long: Show model configurations +usage: docker model configure show [MODEL] +pname: docker model configure +plink: docker_model_configure.yaml +deprecated: false +hidden: true +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context.yaml new file mode 100644 index 00000000000..05a532ba727 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context.yaml @@ -0,0 +1,24 @@ +command: docker model context +short: Manage Docker Model Runner contexts +long: Manage Docker Model Runner contexts +pname: docker model +plink: docker_model.yaml +cname: + - docker model context create + - docker model context inspect + - docker model context ls + - docker model context rm + - docker model context use +clink: + - docker_model_context_create.yaml + - docker_model_context_inspect.yaml + - docker_model_context_ls.yaml + - docker_model_context_rm.yaml + - docker_model_context_use.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_create.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_create.yaml new file mode 100644 index 00000000000..47e5bb49db1 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_create.yaml @@ -0,0 +1,61 @@ +command: docker model context create +short: Create a named Model Runner context +long: Create a named Model Runner context +usage: docker model context create NAME +pname: docker model context +plink: docker_model_context.yaml +options: + - option: description + value_type: string + description: Optional human-readable description for this context + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: host + value_type: string + description: Model Runner API base URL (e.g. http://192.168.1.100:12434) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls + value_type: bool + default_value: "false" + description: Enable TLS for connections to this context + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-ca-cert + value_type: string + description: Path to a custom CA certificate PEM file for TLS verification + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-skip-verify + value_type: bool + default_value: "false" + description: Skip TLS server certificate verification + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_inspect.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_inspect.yaml new file mode 100644 index 00000000000..82c897dd387 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_inspect.yaml @@ -0,0 +1,13 @@ +command: docker model context inspect +short: Display detailed information about one or more contexts +long: Display detailed information about one or more contexts +usage: docker model context inspect NAME [NAME...] +pname: docker model context +plink: docker_model_context.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_ls.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_ls.yaml new file mode 100644 index 00000000000..cf97594f1df --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_ls.yaml @@ -0,0 +1,14 @@ +command: docker model context ls +aliases: docker model context ls, docker model context list +short: List Model Runner contexts +long: List Model Runner contexts +usage: docker model context ls +pname: docker model context +plink: docker_model_context.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_rm.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_rm.yaml new file mode 100644 index 00000000000..2efa303bb1b --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_rm.yaml @@ -0,0 +1,14 @@ +command: docker model context rm +aliases: docker model context rm, docker model context remove +short: Remove one or more Model Runner contexts +long: Remove one or more Model Runner contexts +usage: docker model context rm NAME [NAME...] +pname: docker model context +plink: docker_model_context.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_use.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_use.yaml new file mode 100644 index 00000000000..720281626cf --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_context_use.yaml @@ -0,0 +1,15 @@ +command: docker model context use +short: Set the active Model Runner context +long: |- + Set the active Model Runner context. Pass "default" to revert to + automatic backend detection. +usage: docker model context use NAME +pname: docker model context +plink: docker_model_context.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_df.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_df.yaml new file mode 100644 index 00000000000..7f1ba55b875 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_df.yaml @@ -0,0 +1,13 @@ +command: docker model df +short: Show Docker Model Runner disk usage +long: Show Docker Model Runner disk usage +usage: docker model df +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_gateway.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_gateway.yaml new file mode 100644 index 00000000000..b6cf57a39bd --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_gateway.yaml @@ -0,0 +1,182 @@ +command: docker model gateway +short: Run an OpenAI-compatible LLM gateway +long: |- + `docker model gateway` starts a local OpenAI-compatible HTTP gateway that routes + requests to one or more configured LLM providers. It supports Docker Model Runner + as a first-class provider, alongside Ollama, OpenAI, Anthropic, Groq, Mistral, + Azure OpenAI, and many other OpenAI-compatible endpoints. + + The gateway is configured through a YAML file that declares the model list, + provider routing, load-balancing, retries, and fallbacks. + + ### Configuration file format + + ```yaml + model_list: + - model_name: <alias exposed to clients> + params: + model: <provider>/<upstream-model-name> + api_base: <optional base URL override> + api_key: <optional key or os.environ/VAR_NAME> + + general_settings: + master_key: <optional API key required by clients> + num_retries: <optional integer, default 0> + fallbacks: + - <primary-alias>: [<fallback-alias>, ...] + ``` + + The `model` field under `params` uses the format `provider/model-name`. + Supported provider prefixes include: `docker_model_runner`, `openai`, + `anthropic`, `ollama`, `groq`, `mistral`, `together_ai`, `deepseek`, + `fireworks_ai`, `openrouter`, `perplexity`, `xai`, `nvidia_nim`, + `cerebras`, `sambanova`, `deepinfra`, `azure`, `azure_ai`, `vllm`, + `lm_studio`, `huggingface`. + + API keys can be supplied inline, as `os.environ/VAR_NAME` references, or as + `${VAR_NAME}` references. The gateway resolves well-known environment variables + automatically (for example, `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`). +usage: docker model gateway +pname: docker model +plink: docker_model.yaml +options: + - option: config + shorthand: c + value_type: string + description: Path to the YAML configuration file + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: host + value_type: string + default_value: 0.0.0.0 + description: Host address to bind to + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: port + shorthand: p + value_type: uint16 + default_value: "4000" + description: Port to listen on + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: verbose + shorthand: v + value_type: bool + default_value: "false" + description: Enable verbose (debug) logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Route requests to Docker Model Runner + + ```yaml + model_list: + - model_name: smollm2 + params: + model: docker_model_runner/ai/smollm2 + api_base: http://localhost:12434/engines/llama.cpp/v1 + ``` + + ```console + $ docker model gateway --config config.yaml + ``` + + The gateway starts on `http://0.0.0.0:4000`. Send requests using any + OpenAI-compatible client: + + ```console + $ curl http://localhost:4000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "smollm2", + "messages": [{"role": "user", "content": "Hello"}] + }' + ``` + + ### Route requests to multiple providers with fallback + + ```yaml + model_list: + - model_name: fast + params: + model: groq/llama-3.1-8b-instant + api_key: os.environ/GROQ_API_KEY + - model_name: smart + params: + model: openai/gpt-4o + api_key: os.environ/OPENAI_API_KEY + - model_name: local + params: + model: docker_model_runner/ai/smollm2 + api_base: http://localhost:12434/engines/llama.cpp/v1 + + general_settings: + num_retries: 2 + fallbacks: + - fast: [local] + - smart: [fast, local] + ``` + + ```console + $ docker model gateway --config config.yaml --port 8080 + ``` + + ### Secure the gateway with an API key + + ```yaml + model_list: + - model_name: smollm2 + params: + model: docker_model_runner/ai/smollm2 + api_base: http://localhost:12434/engines/llama.cpp/v1 + + general_settings: + master_key: os.environ/GATEWAY_API_KEY + ``` + + ```console + $ GATEWAY_API_KEY=my-secret docker model gateway --config config.yaml + ``` + + Clients must then pass the key as a Bearer token or via the `x-api-key` header: + + ```console + $ curl http://localhost:4000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{"model": "smollm2", "messages": [{"role": "user", "content": "Hi"}]}' + ``` + + ### Use a custom host and port + + ```console + $ docker model gateway --config config.yaml --host 127.0.0.1 --port 9000 + ``` + + ### Enable debug logging + + ```console + $ docker model gateway --config config.yaml --verbose + ``` +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_inspect.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_inspect.yaml new file mode 100644 index 00000000000..c422f021038 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_inspect.yaml @@ -0,0 +1,35 @@ +command: docker model inspect +short: Display detailed information on one model +long: Display detailed information on one model +usage: docker model inspect MODEL +pname: docker model +plink: docker_model.yaml +options: + - option: openai + value_type: bool + default_value: "false" + description: List model in an OpenAI format + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: remote + shorthand: r + value_type: bool + default_value: "false" + description: Show info for remote models + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_install-runner.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_install-runner.yaml new file mode 100644 index 00000000000..562fd6bbfbf --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_install-runner.yaml @@ -0,0 +1,123 @@ +command: docker model install-runner +short: Install Docker Model Runner (Docker Engine only) +long: | + This command runs implicitly when a docker model command is executed. You can run this command explicitly to add a new configuration. +usage: docker model install-runner +pname: docker model +plink: docker_model.yaml +options: + - option: backend + value_type: string + description: 'Specify backend (llama.cpp|vllm|diffusers). Default: llama.cpp' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: debug + value_type: bool + default_value: "false" + description: Enable debug logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: do-not-track + value_type: bool + default_value: "false" + description: Do not track models usage in Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gpu + value_type: string + default_value: auto + description: Specify GPU support (none|auto|cuda|rocm|musa|cann) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: host + value_type: string + default_value: 127.0.0.1 + description: Host address to bind Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: port + value_type: uint16 + default_value: "0" + description: | + Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: proxy-cert + value_type: string + description: Path to a CA certificate file for proxy SSL inspection + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls + value_type: bool + default_value: "false" + description: Enable TLS/HTTPS for Docker Model Runner API + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-cert + value_type: string + description: Path to TLS certificate file (auto-generated if not provided) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-key + value_type: string + description: Path to TLS private key file (auto-generated if not provided) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-port + value_type: uint16 + default_value: "0" + description: | + TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_launch.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_launch.yaml new file mode 100644 index 00000000000..58f074ee023 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_launch.yaml @@ -0,0 +1,84 @@ +command: docker model launch +short: Launch an app configured to use Docker Model Runner +long: |- + Launch an app configured to use Docker Model Runner. + + Without arguments, lists all supported apps. + + Supported apps: anythingllm, claude, codex, openclaw, opencode, openwebui + + Examples: + docker model launch + docker model launch opencode + docker model launch claude -- --help + docker model launch openwebui --port 3000 + docker model launch claude --config +usage: docker model launch [APP] [-- APP_ARGS...] +pname: docker model +plink: docker_model.yaml +options: + - option: config + value_type: bool + default_value: "false" + description: Print configuration without launching + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: detach + value_type: bool + default_value: "false" + description: Run containerized app in background + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: dry-run + value_type: bool + default_value: "false" + description: Print what would be executed without running it + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: image + value_type: string + description: Override container image for containerized apps + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: model + value_type: string + description: Model to use (for opencode) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: port + value_type: int + default_value: "0" + description: Host port to expose (web UIs) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_list.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_list.yaml new file mode 100644 index 00000000000..242462f4c14 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_list.yaml @@ -0,0 +1,55 @@ +command: docker model list +aliases: docker model list, docker model ls +short: List the models pulled to your local environment +long: List the models pulled to your local environment +usage: docker model list [OPTIONS] [MODEL] +pname: docker model +plink: docker_model.yaml +options: + - option: json + value_type: bool + default_value: "false" + description: List models in a JSON format + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: openai + value_type: bool + default_value: "false" + description: List models in an OpenAI format + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: openaiurl + value_type: string + description: OpenAI-compatible API endpoint URL to list models from + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only show model IDs + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_logs.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_logs.yaml new file mode 100644 index 00000000000..4f5daf177fe --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_logs.yaml @@ -0,0 +1,35 @@ +command: docker model logs +short: Fetch the Docker Model Runner logs +long: Fetch the Docker Model Runner logs +usage: docker model logs [OPTIONS] +pname: docker model +plink: docker_model.yaml +options: + - option: follow + shorthand: f + value_type: bool + default_value: "false" + description: View logs with real-time streaming + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: no-engines + value_type: bool + default_value: "false" + description: Exclude inference engine logs from the output + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_package.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_package.yaml new file mode 100644 index 00000000000..7bc696c5b7c --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_package.yaml @@ -0,0 +1,149 @@ +command: docker model package +short: Package a model into a Docker Model OCI artifact +long: |- + Package a model into a Docker Model OCI artifact. + + The model source must be one of: + --gguf A GGUF file (single file or first shard of a sharded model) + --safetensors-dir A directory containing .safetensors and configuration files + --dduf A .dduf (Diffusers Unified Format) archive + --from An existing packaged model reference + + By default, the packaged artifact is loaded into the local Model Runner content store. + Use --push to publish the model to a registry instead. + + MODEL specifies the target model reference (for example: myorg/llama3:8b). + When using --push, MODEL must be a registry-qualified reference. + + Packaging behavior: + + GGUF + --gguf must point to a .gguf file. + For sharded models, point to the first shard. All shards must: + • reside in the same directory + • follow an indexed naming convention (e.g. model-00001-of-00015.gguf) + All shards are automatically discovered and packaged together. + + Safetensors + --safetensors-dir must point to a directory containing .safetensors files + and required configuration files (e.g. model config, tokenizer files). + All files under the directory (including nested subdirectories) are + automatically discovered. Each file is packaged as a separate OCI layer. + + DDUF + --dduf must point to a .dduf archive file. + + Repackaging + --from repackages an existing model. You may override selected properties + such as --context-size to create a variant of the original model. + + Multimodal models + Use --mmproj to include a multimodal projector file. +usage: docker model package (--gguf <path> | --safetensors-dir <path> | --dduf <path> | --from <model>) [--license <path>...] [--mmproj <path>] [--context-size <tokens>] [--push] MODEL +pname: docker model +plink: docker_model.yaml +options: + - option: chat-template + value_type: string + description: absolute path to chat template file (must be Jinja format) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: context-size + value_type: uint64 + default_value: "0" + description: context size in tokens + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: dduf + value_type: string + description: absolute path to DDUF archive file (Diffusers Unified Format) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: format + value_type: string + default_value: docker + description: | + output artifact format: "docker" (default) or "cncf" (CNCF ModelPack spec) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: from + value_type: string + description: reference to an existing model to repackage + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gguf + value_type: string + description: absolute path to gguf file + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: license + shorthand: l + value_type: stringArray + default_value: '[]' + description: absolute path to a license file + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: mmproj + value_type: string + description: absolute path to multimodal projector file + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: push + value_type: bool + default_value: "false" + description: | + push to registry (if not set, the model is loaded into the Model Runner content store) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: safetensors-dir + value_type: string + description: absolute path to directory containing safetensors files and config + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_ps.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_ps.yaml new file mode 100644 index 00000000000..0f4cf1e63b7 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_ps.yaml @@ -0,0 +1,13 @@ +command: docker model ps +short: List running models +long: List running models +usage: docker model ps +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_pull.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_pull.yaml new file mode 100644 index 00000000000..c939304581d --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_pull.yaml @@ -0,0 +1,33 @@ +command: docker model pull +short: Pull a model from Docker Hub or HuggingFace to your local environment +long: | + Pull a model to your local environment. Downloaded models also appear in the Docker Desktop Dashboard. +usage: docker model pull MODEL +pname: docker model +plink: docker_model.yaml +examples: |- + ### Pulling a model from Docker Hub + + ```console + docker model pull ai/smollm2 + ``` + + ### Pulling from HuggingFace + + You can pull GGUF models directly from [Hugging Face](https://huggingface.co/models?library=gguf). + + **Note about quantization:** If no tag is specified, the command tries to pull the `Q4_K_M` version of the model. + If `Q4_K_M` doesn't exist, the command pulls the first GGUF found in the **Files** view of the model on HuggingFace. + To specify the quantization, provide it as a tag, for example: + `docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_S` + + ```console + docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF + ``` +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_purge.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_purge.yaml new file mode 100644 index 00000000000..425923b91d3 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_purge.yaml @@ -0,0 +1,25 @@ +command: docker model purge +short: Remove all models +long: Remove all models +usage: docker model purge [OPTIONS] +pname: docker model +plink: docker_model.yaml +options: + - option: force + shorthand: f + value_type: bool + default_value: "false" + description: Forcefully remove all models + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_push.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_push.yaml new file mode 100644 index 00000000000..65c9ddfbe64 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_push.yaml @@ -0,0 +1,13 @@ +command: docker model push +short: Push a model to Docker Hub or Hugging Face +long: Push a model to Docker Hub or Hugging Face +usage: docker model push MODEL +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_reinstall-runner.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_reinstall-runner.yaml new file mode 100644 index 00000000000..35d328b5518 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_reinstall-runner.yaml @@ -0,0 +1,123 @@ +command: docker model reinstall-runner +short: Reinstall Docker Model Runner (Docker Engine only) +long: | + This command removes the existing Docker Model Runner container and reinstalls it with the specified configuration. Models and images are preserved during reinstallation. +usage: docker model reinstall-runner +pname: docker model +plink: docker_model.yaml +options: + - option: backend + value_type: string + description: 'Specify backend (llama.cpp|vllm|diffusers). Default: llama.cpp' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: debug + value_type: bool + default_value: "false" + description: Enable debug logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: do-not-track + value_type: bool + default_value: "false" + description: Do not track models usage in Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gpu + value_type: string + default_value: auto + description: Specify GPU support (none|auto|cuda|rocm|musa|cann) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: host + value_type: string + default_value: 127.0.0.1 + description: Host address to bind Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: port + value_type: uint16 + default_value: "0" + description: | + Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: proxy-cert + value_type: string + description: Path to a CA certificate file for proxy SSL inspection + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls + value_type: bool + default_value: "false" + description: Enable TLS/HTTPS for Docker Model Runner API + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-cert + value_type: string + description: Path to TLS certificate file (auto-generated if not provided) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-key + value_type: string + description: Path to TLS private key file (auto-generated if not provided) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-port + value_type: uint16 + default_value: "0" + description: | + TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_requests.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_requests.yaml new file mode 100644 index 00000000000..eecab3ba360 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_requests.yaml @@ -0,0 +1,45 @@ +command: docker model requests +short: Fetch requests+responses from Docker Model Runner +long: Fetch requests+responses from Docker Model Runner +usage: docker model requests [OPTIONS] +pname: docker model +plink: docker_model.yaml +options: + - option: follow + shorthand: f + value_type: bool + default_value: "false" + description: Follow requests stream + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: include-existing + value_type: bool + default_value: "false" + description: | + Include existing requests when starting to follow (only available with --follow) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: model + value_type: string + description: Specify the model to filter requests + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_restart-runner.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_restart-runner.yaml new file mode 100644 index 00000000000..652fb64b080 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_restart-runner.yaml @@ -0,0 +1,77 @@ +command: docker model restart-runner +short: Restart Docker Model Runner (Docker Engine only) +long: |- + This command restarts the Docker Model Runner without pulling container images. Use this command to restart the runner when you already have the required images locally. + + For the first-time setup or to ensure you have the latest images, use `docker model install-runner` instead. +usage: docker model restart-runner +pname: docker model +plink: docker_model.yaml +options: + - option: debug + value_type: bool + default_value: "false" + description: Enable debug logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: do-not-track + value_type: bool + default_value: "false" + description: Do not track models usage in Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gpu + value_type: string + default_value: auto + description: Specify GPU support (none|auto|cuda|rocm|musa|cann) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: host + value_type: string + default_value: 127.0.0.1 + description: Host address to bind Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: port + value_type: uint16 + default_value: "0" + description: | + Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: proxy-cert + value_type: string + description: Path to a CA certificate file for proxy SSL inspection + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_rm.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_rm.yaml new file mode 100644 index 00000000000..1a1028dc8a7 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_rm.yaml @@ -0,0 +1,25 @@ +command: docker model rm +short: Remove local models downloaded from Docker Hub +long: Remove local models downloaded from Docker Hub +usage: docker model rm [MODEL...] +pname: docker model +plink: docker_model.yaml +options: + - option: force + shorthand: f + value_type: bool + default_value: "false" + description: Forcefully remove the model + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_run.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_run.yaml new file mode 100644 index 00000000000..9ffb3785c54 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_run.yaml @@ -0,0 +1,103 @@ +command: docker model run +short: Run a model and interact with it using a submitted prompt or chat mode +long: |- + When you run a model, Docker calls an inference server API endpoint hosted by the Model Runner through Docker Desktop. The model stays in memory until another model is requested, or until a pre-defined inactivity timeout is reached (currently 5 minutes). + + You do not have to use Docker model run before interacting with a specific model from a host process or from within a container. Model Runner transparently loads the requested model on-demand, assuming it has been pulled and is locally available. + + You can also use chat mode in the Docker Desktop Dashboard when you select the model in the **Models** tab. +usage: docker model run MODEL [PROMPT] +pname: docker model +plink: docker_model.yaml +options: + - option: color + value_type: string + default_value: "no" + description: Use colored output (auto|yes|no) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: debug + value_type: bool + default_value: "false" + description: Enable debug logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: detach + shorthand: d + value_type: bool + default_value: "false" + description: Load the model in the background without interaction + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: openaiurl + value_type: string + description: OpenAI-compatible API endpoint URL to chat with + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: websearch + value_type: bool + default_value: "false" + description: Enable web search tool during chat + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### One-time prompt + + ```console + docker model run ai/smollm2 "Hi" + ``` + + Output: + + ```console + Hello! How can I assist you today? + ``` + + ### Interactive chat + + ```console + docker model run ai/smollm2 + ``` + + Output: + + ```console + > Hi + Hi there! It's SmolLM, AI assistant. How can I help you today? + > /bye + ``` + + ### Pre-load a model + + ```console + docker model run --detach ai/smollm2 + ``` + + This loads the model into memory without interaction, ensuring maximum performance for subsequent requests. +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_search.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_search.yaml new file mode 100644 index 00000000000..72129bd7a32 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_search.yaml @@ -0,0 +1,57 @@ +command: docker model search +short: Search for models on Docker Hub and HuggingFace +long: |- + Search for models from Docker Hub (ai/ namespace) and HuggingFace. + + When no search term is provided, lists all available models. + When a search term is provided, filters models by name/description. + + Examples: + docker model search # List available models from Docker Hub + docker model search llama # Search for models containing "llama" + docker model search --source=all # Search both Docker Hub and HuggingFace + docker model search --source=huggingface # Only search HuggingFace + docker model search --limit=50 phi # Search with custom limit + docker model search --json llama # Output as JSON +usage: docker model search [OPTIONS] [TERM] +pname: docker model +plink: docker_model.yaml +options: + - option: json + value_type: bool + default_value: "false" + description: Output results as JSON + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: limit + shorthand: "n" + value_type: int + default_value: "32" + description: Maximum number of results to show + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: source + value_type: string + default_value: all + description: 'Source to search: all, dockerhub, huggingface' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_show.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_show.yaml new file mode 100644 index 00000000000..c119856732f --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_show.yaml @@ -0,0 +1,25 @@ +command: docker model show +short: Show information for a model +long: Display detailed information about a model in a human-readable format. +usage: docker model show MODEL +pname: docker model +plink: docker_model.yaml +options: + - option: remote + shorthand: r + value_type: bool + default_value: "false" + description: Show info for remote models + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_skills.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_skills.yaml new file mode 100644 index 00000000000..27274b18b0f --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_skills.yaml @@ -0,0 +1,79 @@ +command: docker model skills +short: Install Docker Model Runner skills for AI coding assistants +long: |- + Install Docker Model Runner skills for AI coding assistants. + + Skills are configuration files that help AI coding assistants understand + how to use Docker Model Runner effectively for local model inference. + + Supported targets: + --codex Install to ~/.codex/skills (OpenAI Codex CLI) + --claude Install to ~/.claude/skills (Claude Code) + --opencode Install to ~/.config/opencode/skills (OpenCode) + --dest Install to a custom directory + + Example: + docker model skills --claude + docker model skills --codex --claude + docker model skills --dest /path/to/skills +usage: docker model skills +pname: docker model +plink: docker_model.yaml +options: + - option: claude + value_type: bool + default_value: "false" + description: Install skills for Claude Code (~/.claude/skills) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: codex + value_type: bool + default_value: "false" + description: Install skills for OpenAI Codex CLI (~/.codex/skills) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: dest + value_type: string + description: Install skills to a custom directory + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: force + shorthand: f + value_type: bool + default_value: "false" + description: Overwrite existing skills without prompting + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: opencode + value_type: bool + default_value: "false" + description: Install skills for OpenCode (~/.config/opencode/skills) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_start-runner.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_start-runner.yaml new file mode 100644 index 00000000000..740e36c53a1 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_start-runner.yaml @@ -0,0 +1,125 @@ +command: docker model start-runner +short: Start Docker Model Runner (Docker Engine only) +long: |- + This command starts the Docker Model Runner without pulling container images. Use this command to start the runner when you already have the required images locally. + + For the first-time setup or to ensure you have the latest images, use `docker model install-runner` instead. +usage: docker model start-runner +pname: docker model +plink: docker_model.yaml +options: + - option: backend + value_type: string + description: 'Specify backend (llama.cpp|vllm|diffusers). Default: llama.cpp' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: debug + value_type: bool + default_value: "false" + description: Enable debug logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: do-not-track + value_type: bool + default_value: "false" + description: Do not track models usage in Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gpu + value_type: string + default_value: auto + description: Specify GPU support (none|auto|cuda|rocm|musa|cann) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: host + value_type: string + default_value: 127.0.0.1 + description: Host address to bind Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: port + value_type: uint16 + default_value: "0" + description: | + Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: proxy-cert + value_type: string + description: Path to a CA certificate file for proxy SSL inspection + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls + value_type: bool + default_value: "false" + description: Enable TLS/HTTPS for Docker Model Runner API + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-cert + value_type: string + description: Path to TLS certificate file (auto-generated if not provided) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-key + value_type: string + description: Path to TLS private key file (auto-generated if not provided) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tls-port + value_type: uint16 + default_value: "0" + description: | + TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_status.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_status.yaml new file mode 100644 index 00000000000..8f0b789f85b --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_status.yaml @@ -0,0 +1,25 @@ +command: docker model status +short: Check if the Docker Model Runner is running +long: | + Check whether the Docker Model Runner is running and displays the current inference engine. +usage: docker model status +pname: docker model +plink: docker_model.yaml +options: + - option: json + value_type: bool + default_value: "false" + description: Format output in JSON + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_stop-runner.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_stop-runner.yaml new file mode 100644 index 00000000000..8d565b86819 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_stop-runner.yaml @@ -0,0 +1,27 @@ +command: docker model stop-runner +short: Stop Docker Model Runner (Docker Engine only) +long: |- + This command stops the Docker Model Runner by removing the running containers, but preserves the container images on disk. Use this command when you want to temporarily stop the runner but plan to start it again later. + + To completely remove the runner including images, use `docker model uninstall-runner --images` instead. +usage: docker model stop-runner +pname: docker model +plink: docker_model.yaml +options: + - option: models + value_type: bool + default_value: "false" + description: Remove model storage volume + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_tag.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_tag.yaml new file mode 100644 index 00000000000..6239e613875 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_tag.yaml @@ -0,0 +1,14 @@ +command: docker model tag +short: Tag a model +long: | + Specify a particular version or variant of the model. If no tag is provided, Docker defaults to `latest`. +usage: docker model tag SOURCE TARGET +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_uninstall-runner.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_uninstall-runner.yaml new file mode 100644 index 00000000000..e93363d5381 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_uninstall-runner.yaml @@ -0,0 +1,43 @@ +command: docker model uninstall-runner +short: Uninstall Docker Model Runner (Docker Engine only) +long: Uninstall Docker Model Runner (Docker Engine only) +usage: docker model uninstall-runner +pname: docker model +plink: docker_model.yaml +options: + - option: backend + value_type: string + description: Uninstall a deferred backend (e.g. vllm, diffusers) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: images + value_type: bool + default_value: "false" + description: Remove docker/model-runner images + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: models + value_type: bool + default_value: "false" + description: Remove model storage volume + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_unload.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_unload.yaml new file mode 100644 index 00000000000..ee0de45bb32 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_unload.yaml @@ -0,0 +1,34 @@ +command: docker model unload +aliases: docker model unload, docker model stop +short: Unload running models +long: Unload running models +usage: docker model unload (MODEL [MODEL ...] [--backend BACKEND] | --all) +pname: docker model +plink: docker_model.yaml +options: + - option: all + value_type: bool + default_value: "false" + description: Unload all running models + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: backend + value_type: string + description: Optional backend to target + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_version.yaml b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_version.yaml new file mode 100644 index 00000000000..2674843c99e --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/docker_model_version.yaml @@ -0,0 +1,13 @@ +command: docker model version +short: Show the Docker Model Runner version +long: Show the Docker Model Runner version +usage: docker model version +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model.md new file mode 100644 index 00000000000..e26c01924be --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model.md @@ -0,0 +1,47 @@ +# docker model + +<!---MARKER_GEN_START--> +Docker Model Runner + +### Subcommands + +| Name | Description | +|:------------------------------------------------|:-----------------------------------------------------------------------| +| [`bench`](model_bench.md) | Benchmark a model's performance at different concurrency levels | +| [`context`](model_context.md) | Manage Docker Model Runner contexts | +| [`df`](model_df.md) | Show Docker Model Runner disk usage | +| [`gateway`](model_gateway.md) | Run an OpenAI-compatible LLM gateway | +| [`inspect`](model_inspect.md) | Display detailed information on one model | +| [`install-runner`](model_install-runner.md) | Install Docker Model Runner (Docker Engine only) | +| [`launch`](model_launch.md) | Launch an app configured to use Docker Model Runner | +| [`list`](model_list.md) | List the models pulled to your local environment | +| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs | +| [`package`](model_package.md) | Package a model into a Docker Model OCI artifact | +| [`ps`](model_ps.md) | List running models | +| [`pull`](model_pull.md) | Pull a model from Docker Hub or HuggingFace to your local environment | +| [`purge`](model_purge.md) | Remove all models | +| [`push`](model_push.md) | Push a model to Docker Hub or Hugging Face | +| [`reinstall-runner`](model_reinstall-runner.md) | Reinstall Docker Model Runner (Docker Engine only) | +| [`requests`](model_requests.md) | Fetch requests+responses from Docker Model Runner | +| [`restart-runner`](model_restart-runner.md) | Restart Docker Model Runner (Docker Engine only) | +| [`rm`](model_rm.md) | Remove local models downloaded from Docker Hub | +| [`run`](model_run.md) | Run a model and interact with it using a submitted prompt or chat mode | +| [`search`](model_search.md) | Search for models on Docker Hub and HuggingFace | +| [`show`](model_show.md) | Show information for a model | +| [`skills`](model_skills.md) | Install Docker Model Runner skills for AI coding assistants | +| [`start-runner`](model_start-runner.md) | Start Docker Model Runner (Docker Engine only) | +| [`status`](model_status.md) | Check if the Docker Model Runner is running | +| [`stop-runner`](model_stop-runner.md) | Stop Docker Model Runner (Docker Engine only) | +| [`tag`](model_tag.md) | Tag a model | +| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner (Docker Engine only) | +| [`unload`](model_unload.md) | Unload running models | +| [`version`](model_version.md) | Show the Docker Model Runner version | + + + +<!---MARKER_GEN_END--> + +## Description + +Use Docker Model Runner to run and interact with AI models directly from the command line. +For more information, see the [documentation](https://docs.docker.com/ai/model-runner/) diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_bench.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_bench.md new file mode 100644 index 00000000000..fbeaf5ac535 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_bench.md @@ -0,0 +1,21 @@ +# docker model bench + +<!---MARKER_GEN_START--> +Benchmark a model's performance showing tokens per second at different concurrency levels. + +This command runs a series of benchmarks with 1, 2, 4, and 8 concurrent requests by default, +measuring the tokens per second (TPS) that the model can generate. + +### Options + +| Name | Type | Default | Description | +|:----------------|:-----------|:--------------------------------------------------------------------------------|:--------------------------------------| +| `--concurrency` | `intSlice` | `[1,2,4,8]` | Concurrency levels to test | +| `--duration` | `duration` | `30s` | Duration to run each concurrency test | +| `--json` | `bool` | | Output results in JSON format | +| `--prompt` | `string` | `Write a comprehensive 100 word summary on whales and their impact on society.` | Prompt to use for benchmarking | +| `--timeout` | `duration` | `5m0s` | Timeout for each individual request | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_configure.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_configure.md new file mode 100644 index 00000000000..81fc1546bd5 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_configure.md @@ -0,0 +1,14 @@ +# docker model configure + +<!---MARKER_GEN_START--> +Configure runtime options for a model + +### Options + +| Name | Type | Default | Description | +|:-----------------|:--------|:--------|:-------------------------| +| `--context-size` | `int64` | `-1` | context size (in tokens) | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context.md new file mode 100644 index 00000000000..d5c05658ce3 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context.md @@ -0,0 +1,19 @@ +# docker model context + +<!---MARKER_GEN_START--> +Manage Docker Model Runner contexts + +### Subcommands + +| Name | Description | +|:--------------------------------------|:--------------------------------------------------------| +| [`create`](model_context_create.md) | Create a named Model Runner context | +| [`inspect`](model_context_inspect.md) | Display detailed information about one or more contexts | +| [`ls`](model_context_ls.md) | List Model Runner contexts | +| [`rm`](model_context_rm.md) | Remove one or more Model Runner contexts | +| [`use`](model_context_use.md) | Set the active Model Runner context | + + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_create.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_create.md new file mode 100644 index 00000000000..cee0c633827 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_create.md @@ -0,0 +1,18 @@ +# docker model context create + +<!---MARKER_GEN_START--> +Create a named Model Runner context + +### Options + +| Name | Type | Default | Description | +|:--------------------|:---------|:--------|:--------------------------------------------------------------| +| `--description` | `string` | | Optional human-readable description for this context | +| `--host` | `string` | | Model Runner API base URL (e.g. http://192.168.1.100:12434) | +| `--tls` | `bool` | | Enable TLS for connections to this context | +| `--tls-ca-cert` | `string` | | Path to a custom CA certificate PEM file for TLS verification | +| `--tls-skip-verify` | `bool` | | Skip TLS server certificate verification | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_inspect.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_inspect.md new file mode 100644 index 00000000000..75357d4a1d6 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_inspect.md @@ -0,0 +1,8 @@ +# docker model context inspect + +<!---MARKER_GEN_START--> +Display detailed information about one or more contexts + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_ls.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_ls.md new file mode 100644 index 00000000000..5d9e980d85d --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_ls.md @@ -0,0 +1,12 @@ +# docker model context ls + +<!---MARKER_GEN_START--> +List Model Runner contexts + +### Aliases + +`docker model context ls`, `docker model context list` + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_rm.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_rm.md new file mode 100644 index 00000000000..65a408590f8 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_rm.md @@ -0,0 +1,12 @@ +# docker model context rm + +<!---MARKER_GEN_START--> +Remove one or more Model Runner contexts + +### Aliases + +`docker model context rm`, `docker model context remove` + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_use.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_use.md new file mode 100644 index 00000000000..f6544f63fd1 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_context_use.md @@ -0,0 +1,9 @@ +# docker model context use + +<!---MARKER_GEN_START--> +Set the active Model Runner context. Pass "default" to revert to +automatic backend detection. + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_df.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_df.md new file mode 100644 index 00000000000..e6a4073670b --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_df.md @@ -0,0 +1,8 @@ +# docker model df + +<!---MARKER_GEN_START--> +Show Docker Model Runner disk usage + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_gateway.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_gateway.md new file mode 100644 index 00000000000..630ef5ecc96 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_gateway.md @@ -0,0 +1,150 @@ +# docker model gateway + +<!---MARKER_GEN_START--> +Run an OpenAI-compatible LLM gateway that routes requests to configured providers. + +Supported providers include Docker Model Runner, Ollama, OpenAI, Anthropic, +Groq, Mistral, Azure OpenAI, and many more OpenAI-compatible endpoints. + +### Options + +| Name | Type | Default | Description | +|:------------------|:---------|:----------|:------------------------------------| +| `-c`, `--config` | `string` | | Path to the YAML configuration file | +| `--host` | `string` | `0.0.0.0` | Host address to bind to | +| `-p`, `--port` | `uint16` | `4000` | Port to listen on | +| `-v`, `--verbose` | `bool` | | Enable verbose (debug) logging | + + +<!---MARKER_GEN_END--> + +## Description + +`docker model gateway` starts a local OpenAI-compatible HTTP gateway that routes +requests to one or more configured LLM providers. It supports Docker Model Runner +as a first-class provider, alongside Ollama, OpenAI, Anthropic, Groq, Mistral, +Azure OpenAI, and many other OpenAI-compatible endpoints. + +The gateway is configured through a YAML file that declares the model list, +provider routing, load-balancing, retries, and fallbacks. + +### Configuration file format + +```yaml +model_list: + - model_name: <alias exposed to clients> + params: + model: <provider>/<upstream-model-name> + api_base: <optional base URL override> + api_key: <optional key or os.environ/VAR_NAME> + +general_settings: + master_key: <optional API key required by clients> + num_retries: <optional integer, default 0> + fallbacks: + - <primary-alias>: [<fallback-alias>, ...] +``` + +The `model` field under `params` uses the format `provider/model-name`. +Supported provider prefixes include: `docker_model_runner`, `openai`, +`anthropic`, `ollama`, `groq`, `mistral`, `together_ai`, `deepseek`, +`fireworks_ai`, `openrouter`, `perplexity`, `xai`, `nvidia_nim`, +`cerebras`, `sambanova`, `deepinfra`, `azure`, `azure_ai`, `vllm`, +`lm_studio`, `huggingface`. + +API keys can be supplied inline, as `os.environ/VAR_NAME` references, or as +`${VAR_NAME}` references. The gateway resolves well-known environment variables +automatically (for example, `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`). + +## Examples + +### Route requests to Docker Model Runner + +```yaml +model_list: + - model_name: smollm2 + params: + model: docker_model_runner/ai/smollm2 + api_base: http://localhost:12434/engines/llama.cpp/v1 +``` + +```console +$ docker model gateway --config config.yaml +``` + +The gateway starts on `http://0.0.0.0:4000`. Send requests using any +OpenAI-compatible client: + +```console +$ curl http://localhost:4000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "smollm2", + "messages": [{"role": "user", "content": "Hello"}] + }' +``` + +### Route requests to multiple providers with fallback + +```yaml +model_list: + - model_name: fast + params: + model: groq/llama-3.1-8b-instant + api_key: os.environ/GROQ_API_KEY + - model_name: smart + params: + model: openai/gpt-4o + api_key: os.environ/OPENAI_API_KEY + - model_name: local + params: + model: docker_model_runner/ai/smollm2 + api_base: http://localhost:12434/engines/llama.cpp/v1 + +general_settings: + num_retries: 2 + fallbacks: + - fast: [local] + - smart: [fast, local] +``` + +```console +$ docker model gateway --config config.yaml --port 8080 +``` + +### Secure the gateway with an API key + +```yaml +model_list: + - model_name: smollm2 + params: + model: docker_model_runner/ai/smollm2 + api_base: http://localhost:12434/engines/llama.cpp/v1 + +general_settings: + master_key: os.environ/GATEWAY_API_KEY +``` + +```console +$ GATEWAY_API_KEY=my-secret docker model gateway --config config.yaml +``` + +Clients must then pass the key as a Bearer token or via the `x-api-key` header: + +```console +$ curl http://localhost:4000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{"model": "smollm2", "messages": [{"role": "user", "content": "Hi"}]}' +``` + +### Use a custom host and port + +```console +$ docker model gateway --config config.yaml --host 127.0.0.1 --port 9000 +``` + +### Enable debug logging + +```console +$ docker model gateway --config config.yaml --verbose +``` diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_inspect.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_inspect.md new file mode 100644 index 00000000000..7df01509381 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_inspect.md @@ -0,0 +1,15 @@ +# docker model inspect + +<!---MARKER_GEN_START--> +Display detailed information on one model + +### Options + +| Name | Type | Default | Description | +|:-----------------|:-------|:--------|:-------------------------------| +| `--openai` | `bool` | | List model in an OpenAI format | +| `-r`, `--remote` | `bool` | | Show info for remote models | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_install-runner.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_install-runner.md new file mode 100644 index 00000000000..de40a502865 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_install-runner.md @@ -0,0 +1,27 @@ +# docker model install-runner + +<!---MARKER_GEN_START--> +Install Docker Model Runner (Docker Engine only) + +### Options + +| Name | Type | Default | Description | +|:-----------------|:---------|:------------|:-------------------------------------------------------------------------------------------------------| +| `--backend` | `string` | | Specify backend (llama.cpp\|vllm\|diffusers). Default: llama.cpp | +| `--debug` | `bool` | | Enable debug logging | +| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner | +| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) | +| `--host` | `string` | `127.0.0.1` | Host address to bind Docker Model Runner | +| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) | +| `--proxy-cert` | `string` | | Path to a CA certificate file for proxy SSL inspection | +| `--tls` | `bool` | | Enable TLS/HTTPS for Docker Model Runner API | +| `--tls-cert` | `string` | | Path to TLS certificate file (auto-generated if not provided) | +| `--tls-key` | `string` | | Path to TLS private key file (auto-generated if not provided) | +| `--tls-port` | `uint16` | `0` | TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) | + + +<!---MARKER_GEN_END--> + +## Description + + This command runs implicitly when a docker model command is executed. You can run this command explicitly to add a new configuration. diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_launch.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_launch.md new file mode 100644 index 00000000000..161e2a3a306 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_launch.md @@ -0,0 +1,30 @@ +# docker model launch + +<!---MARKER_GEN_START--> +Launch an app configured to use Docker Model Runner. + +Without arguments, lists all supported apps. + +Supported apps: anythingllm, claude, codex, openclaw, opencode, openwebui + +Examples: + docker model launch + docker model launch opencode + docker model launch claude -- --help + docker model launch openwebui --port 3000 + docker model launch claude --config + +### Options + +| Name | Type | Default | Description | +|:------------|:---------|:--------|:------------------------------------------------| +| `--config` | `bool` | | Print configuration without launching | +| `--detach` | `bool` | | Run containerized app in background | +| `--dry-run` | `bool` | | Print what would be executed without running it | +| `--image` | `string` | | Override container image for containerized apps | +| `--model` | `string` | | Model to use (for opencode) | +| `--port` | `int` | `0` | Host port to expose (web UIs) | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_list.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_list.md new file mode 100644 index 00000000000..24d260a5d86 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_list.md @@ -0,0 +1,21 @@ +# docker model list + +<!---MARKER_GEN_START--> +List the models pulled to your local environment + +### Aliases + +`docker model list`, `docker model ls` + +### Options + +| Name | Type | Default | Description | +|:----------------|:---------|:--------|:-------------------------------------------------------| +| `--json` | `bool` | | List models in a JSON format | +| `--openai` | `bool` | | List models in an OpenAI format | +| `--openaiurl` | `string` | | OpenAI-compatible API endpoint URL to list models from | +| `-q`, `--quiet` | `bool` | | Only show model IDs | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_logs.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_logs.md new file mode 100644 index 00000000000..8c5810924a1 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_logs.md @@ -0,0 +1,15 @@ +# docker model logs + +<!---MARKER_GEN_START--> +Fetch the Docker Model Runner logs + +### Options + +| Name | Type | Default | Description | +|:-----------------|:-------|:--------|:----------------------------------------------| +| `-f`, `--follow` | `bool` | | View logs with real-time streaming | +| `--no-engines` | `bool` | | Exclude inference engine logs from the output | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_package.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_package.md new file mode 100644 index 00000000000..571b77c1fa3 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_package.md @@ -0,0 +1,60 @@ +# docker model package + +<!---MARKER_GEN_START--> +Package a model into a Docker Model OCI artifact. + +The model source must be one of: + --gguf A GGUF file (single file or first shard of a sharded model) + --safetensors-dir A directory containing .safetensors and configuration files + --dduf A .dduf (Diffusers Unified Format) archive + --from An existing packaged model reference + +By default, the packaged artifact is loaded into the local Model Runner content store. +Use --push to publish the model to a registry instead. + +MODEL specifies the target model reference (for example: myorg/llama3:8b). +When using --push, MODEL must be a registry-qualified reference. + +Packaging behavior: + + GGUF + --gguf must point to a .gguf file. + For sharded models, point to the first shard. All shards must: + • reside in the same directory + • follow an indexed naming convention (e.g. model-00001-of-00015.gguf) + All shards are automatically discovered and packaged together. + + Safetensors + --safetensors-dir must point to a directory containing .safetensors files + and required configuration files (e.g. model config, tokenizer files). + All files under the directory (including nested subdirectories) are + automatically discovered. Each file is packaged as a separate OCI layer. + + DDUF + --dduf must point to a .dduf archive file. + + Repackaging + --from repackages an existing model. You may override selected properties + such as --context-size to create a variant of the original model. + + Multimodal models + Use --mmproj to include a multimodal projector file. + +### Options + +| Name | Type | Default | Description | +|:--------------------|:--------------|:---------|:---------------------------------------------------------------------------------------| +| `--chat-template` | `string` | | absolute path to chat template file (must be Jinja format) | +| `--context-size` | `uint64` | `0` | context size in tokens | +| `--dduf` | `string` | | absolute path to DDUF archive file (Diffusers Unified Format) | +| `--format` | `string` | `docker` | output artifact format: "docker" (default) or "cncf" (CNCF ModelPack spec) | +| `--from` | `string` | | reference to an existing model to repackage | +| `--gguf` | `string` | | absolute path to gguf file | +| `-l`, `--license` | `stringArray` | | absolute path to a license file | +| `--mmproj` | `string` | | absolute path to multimodal projector file | +| `--push` | `bool` | | push to registry (if not set, the model is loaded into the Model Runner content store) | +| `--safetensors-dir` | `string` | | absolute path to directory containing safetensors files and config | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_ps.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_ps.md new file mode 100644 index 00000000000..15f5371553f --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_ps.md @@ -0,0 +1,8 @@ +# docker model ps + +<!---MARKER_GEN_START--> +List running models + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_pull.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_pull.md new file mode 100644 index 00000000000..246cc59d78a --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_pull.md @@ -0,0 +1,32 @@ +# docker model pull + +<!---MARKER_GEN_START--> +Pull a model from Docker Hub or HuggingFace to your local environment + + +<!---MARKER_GEN_END--> + +## Description + +Pull a model to your local environment. Downloaded models also appear in the Docker Desktop Dashboard. + +## Examples + +### Pulling a model from Docker Hub + +```console +docker model pull ai/smollm2 +``` + +### Pulling from HuggingFace + +You can pull GGUF models directly from [Hugging Face](https://huggingface.co/models?library=gguf). + +**Note about quantization:** If no tag is specified, the command tries to pull the `Q4_K_M` version of the model. +If `Q4_K_M` doesn't exist, the command pulls the first GGUF found in the **Files** view of the model on HuggingFace. +To specify the quantization, provide it as a tag, for example: +`docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_S` + +```console +docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF +``` diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_purge.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_purge.md new file mode 100644 index 00000000000..4fcc85c349d --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_purge.md @@ -0,0 +1,14 @@ +# docker model purge + +<!---MARKER_GEN_START--> +Remove all models + +### Options + +| Name | Type | Default | Description | +|:----------------|:-------|:--------|:-----------------------------| +| `-f`, `--force` | `bool` | | Forcefully remove all models | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_push.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_push.md new file mode 100644 index 00000000000..7b040fe0bf8 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_push.md @@ -0,0 +1,13 @@ +# docker model push + +<!---MARKER_GEN_START--> +Push a model to Docker Hub or Hugging Face + + +<!---MARKER_GEN_END--> + +### Example + +```console +docker model push <namespace>/<model> +``` diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_reinstall-runner.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_reinstall-runner.md new file mode 100644 index 00000000000..457b322e578 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_reinstall-runner.md @@ -0,0 +1,27 @@ +# docker model reinstall-runner + +<!---MARKER_GEN_START--> +Reinstall Docker Model Runner (Docker Engine only) + +### Options + +| Name | Type | Default | Description | +|:-----------------|:---------|:------------|:-------------------------------------------------------------------------------------------------------| +| `--backend` | `string` | | Specify backend (llama.cpp\|vllm\|diffusers). Default: llama.cpp | +| `--debug` | `bool` | | Enable debug logging | +| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner | +| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) | +| `--host` | `string` | `127.0.0.1` | Host address to bind Docker Model Runner | +| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) | +| `--proxy-cert` | `string` | | Path to a CA certificate file for proxy SSL inspection | +| `--tls` | `bool` | | Enable TLS/HTTPS for Docker Model Runner API | +| `--tls-cert` | `string` | | Path to TLS certificate file (auto-generated if not provided) | +| `--tls-key` | `string` | | Path to TLS private key file (auto-generated if not provided) | +| `--tls-port` | `uint16` | `0` | TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) | + + +<!---MARKER_GEN_END--> + +## Description + +This command removes the existing Docker Model Runner container and reinstalls it with the specified configuration. Models and images are preserved during reinstallation. diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_requests.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_requests.md new file mode 100644 index 00000000000..970dc3c3d6e --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_requests.md @@ -0,0 +1,16 @@ +# docker model requests + +<!---MARKER_GEN_START--> +Fetch requests+responses from Docker Model Runner + +### Options + +| Name | Type | Default | Description | +|:---------------------|:---------|:--------|:---------------------------------------------------------------------------------| +| `-f`, `--follow` | `bool` | | Follow requests stream | +| `--include-existing` | `bool` | | Include existing requests when starting to follow (only available with --follow) | +| `--model` | `string` | | Specify the model to filter requests | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_restart-runner.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_restart-runner.md new file mode 100644 index 00000000000..80565a8dfa1 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_restart-runner.md @@ -0,0 +1,24 @@ +# docker model restart-runner + +<!---MARKER_GEN_START--> +Restart Docker Model Runner (Docker Engine only) + +### Options + +| Name | Type | Default | Description | +|:-----------------|:---------|:------------|:-------------------------------------------------------------------------------------------------------| +| `--debug` | `bool` | | Enable debug logging | +| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner | +| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) | +| `--host` | `string` | `127.0.0.1` | Host address to bind Docker Model Runner | +| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) | +| `--proxy-cert` | `string` | | Path to a CA certificate file for proxy SSL inspection | + + +<!---MARKER_GEN_END--> + +## Description + +This command restarts the Docker Model Runner without pulling container images. Use this command to restart the runner when you already have the required images locally. + +For the first-time setup or to ensure you have the latest images, use `docker model install-runner` instead. diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_rm.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_rm.md new file mode 100644 index 00000000000..6463903bd89 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_rm.md @@ -0,0 +1,14 @@ +# docker model rm + +<!---MARKER_GEN_START--> +Remove local models downloaded from Docker Hub + +### Options + +| Name | Type | Default | Description | +|:----------------|:-------|:--------|:----------------------------| +| `-f`, `--force` | `bool` | | Forcefully remove the model | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_run.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_run.md new file mode 100644 index 00000000000..b6190d26a03 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_run.md @@ -0,0 +1,61 @@ +# docker model run + +<!---MARKER_GEN_START--> +Run a model and interact with it using a submitted prompt or chat mode + +### Options + +| Name | Type | Default | Description | +|:-----------------|:---------|:--------|:-----------------------------------------------------| +| `--color` | `string` | `no` | Use colored output (auto\|yes\|no) | +| `--debug` | `bool` | | Enable debug logging | +| `-d`, `--detach` | `bool` | | Load the model in the background without interaction | +| `--openaiurl` | `string` | | OpenAI-compatible API endpoint URL to chat with | +| `--websearch` | `bool` | | Enable web search tool during chat | + + +<!---MARKER_GEN_END--> + +## Description + +When you run a model, Docker calls an inference server API endpoint hosted by the Model Runner through Docker Desktop. The model stays in memory until another model is requested, or until a pre-defined inactivity timeout is reached (currently 5 minutes). + +You do not have to use Docker model run before interacting with a specific model from a host process or from within a container. Model Runner transparently loads the requested model on-demand, assuming it has been pulled and is locally available. + +You can also use chat mode in the Docker Desktop Dashboard when you select the model in the **Models** tab. + +## Examples + +### One-time prompt + +```console +docker model run ai/smollm2 "Hi" +``` + +Output: + +```console +Hello! How can I assist you today? +``` + +### Interactive chat + +```console +docker model run ai/smollm2 +``` + +Output: + +```console +> Hi +Hi there! It's SmolLM, AI assistant. How can I help you today? +> /bye +``` + +### Pre-load a model + +```console +docker model run --detach ai/smollm2 +``` + +This loads the model into memory without interaction, ensuring maximum performance for subsequent requests. diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_search.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_search.md new file mode 100644 index 00000000000..b146e60c6d1 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_search.md @@ -0,0 +1,27 @@ +# docker model search + +<!---MARKER_GEN_START--> +Search for models from Docker Hub (ai/ namespace) and HuggingFace. + +When no search term is provided, lists all available models. +When a search term is provided, filters models by name/description. + +Examples: + docker model search # List available models from Docker Hub + docker model search llama # Search for models containing "llama" + docker model search --source=all # Search both Docker Hub and HuggingFace + docker model search --source=huggingface # Only search HuggingFace + docker model search --limit=50 phi # Search with custom limit + docker model search --json llama # Output as JSON + +### Options + +| Name | Type | Default | Description | +|:----------------|:---------|:--------|:----------------------------------------------| +| `--json` | `bool` | | Output results as JSON | +| `-n`, `--limit` | `int` | `32` | Maximum number of results to show | +| `--source` | `string` | `all` | Source to search: all, dockerhub, huggingface | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_show.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_show.md new file mode 100644 index 00000000000..d8c37da522a --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_show.md @@ -0,0 +1,14 @@ +# docker model show + +<!---MARKER_GEN_START--> +Display detailed information about a model in a human-readable format. + +### Options + +| Name | Type | Default | Description | +|:-----------------|:-------|:--------|:----------------------------| +| `-r`, `--remote` | `bool` | | Show info for remote models | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_skills.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_skills.md new file mode 100644 index 00000000000..39ecc0ed721 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_skills.md @@ -0,0 +1,32 @@ +# docker model skills + +<!---MARKER_GEN_START--> +Install Docker Model Runner skills for AI coding assistants. + +Skills are configuration files that help AI coding assistants understand +how to use Docker Model Runner effectively for local model inference. + +Supported targets: + --codex Install to ~/.codex/skills (OpenAI Codex CLI) + --claude Install to ~/.claude/skills (Claude Code) + --opencode Install to ~/.config/opencode/skills (OpenCode) + --dest Install to a custom directory + +Example: + docker model skills --claude + docker model skills --codex --claude + docker model skills --dest /path/to/skills + +### Options + +| Name | Type | Default | Description | +|:----------------|:---------|:--------|:--------------------------------------------------------| +| `--claude` | `bool` | | Install skills for Claude Code (~/.claude/skills) | +| `--codex` | `bool` | | Install skills for OpenAI Codex CLI (~/.codex/skills) | +| `--dest` | `string` | | Install skills to a custom directory | +| `-f`, `--force` | `bool` | | Overwrite existing skills without prompting | +| `--opencode` | `bool` | | Install skills for OpenCode (~/.config/opencode/skills) | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_start-runner.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_start-runner.md new file mode 100644 index 00000000000..24cf2fe12f3 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_start-runner.md @@ -0,0 +1,29 @@ +# docker model start-runner + +<!---MARKER_GEN_START--> +Start Docker Model Runner (Docker Engine only) + +### Options + +| Name | Type | Default | Description | +|:-----------------|:---------|:------------|:-------------------------------------------------------------------------------------------------------| +| `--backend` | `string` | | Specify backend (llama.cpp\|vllm\|diffusers). Default: llama.cpp | +| `--debug` | `bool` | | Enable debug logging | +| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner | +| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) | +| `--host` | `string` | `127.0.0.1` | Host address to bind Docker Model Runner | +| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) | +| `--proxy-cert` | `string` | | Path to a CA certificate file for proxy SSL inspection | +| `--tls` | `bool` | | Enable TLS/HTTPS for Docker Model Runner API | +| `--tls-cert` | `string` | | Path to TLS certificate file (auto-generated if not provided) | +| `--tls-key` | `string` | | Path to TLS private key file (auto-generated if not provided) | +| `--tls-port` | `uint16` | `0` | TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) | + + +<!---MARKER_GEN_END--> + +## Description + +This command starts the Docker Model Runner without pulling container images. Use this command to start the runner when you already have the required images locally. + +For the first-time setup or to ensure you have the latest images, use `docker model install-runner` instead. diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_status.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_status.md new file mode 100644 index 00000000000..baa630073db --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_status.md @@ -0,0 +1,17 @@ +# docker model status + +<!---MARKER_GEN_START--> +Check if the Docker Model Runner is running + +### Options + +| Name | Type | Default | Description | +|:---------|:-------|:--------|:----------------------| +| `--json` | `bool` | | Format output in JSON | + + +<!---MARKER_GEN_END--> + +## Description + +Check whether the Docker Model Runner is running and displays the current inference engine. diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_stop-runner.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_stop-runner.md new file mode 100644 index 00000000000..99a6b0cda60 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_stop-runner.md @@ -0,0 +1,19 @@ +# docker model stop-runner + +<!---MARKER_GEN_START--> +Stop Docker Model Runner (Docker Engine only) + +### Options + +| Name | Type | Default | Description | +|:-----------|:-------|:--------|:----------------------------| +| `--models` | `bool` | | Remove model storage volume | + + +<!---MARKER_GEN_END--> + +## Description + +This command stops the Docker Model Runner by removing the running containers, but preserves the container images on disk. Use this command when you want to temporarily stop the runner but plan to start it again later. + +To completely remove the runner including images, use `docker model uninstall-runner --images` instead. diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_tag.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_tag.md new file mode 100644 index 00000000000..3f1615e296f --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_tag.md @@ -0,0 +1,11 @@ +# docker model tag + +<!---MARKER_GEN_START--> +Tag a model + + +<!---MARKER_GEN_END--> + +## Description + +Specify a particular version or variant of the model. If no tag is provided, Docker defaults to `latest`. diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_uninstall-runner.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_uninstall-runner.md new file mode 100644 index 00000000000..8beb8744fd4 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_uninstall-runner.md @@ -0,0 +1,16 @@ +# docker model uninstall-runner + +<!---MARKER_GEN_START--> +Uninstall Docker Model Runner (Docker Engine only) + +### Options + +| Name | Type | Default | Description | +|:------------|:---------|:--------|:----------------------------------------------------| +| `--backend` | `string` | | Uninstall a deferred backend (e.g. vllm, diffusers) | +| `--images` | `bool` | | Remove docker/model-runner images | +| `--models` | `bool` | | Remove model storage volume | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_unload.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_unload.md new file mode 100644 index 00000000000..17060c64965 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_unload.md @@ -0,0 +1,19 @@ +# docker model unload + +<!---MARKER_GEN_START--> +Unload running models + +### Aliases + +`docker model unload`, `docker model stop` + +### Options + +| Name | Type | Default | Description | +|:------------|:---------|:--------|:---------------------------| +| `--all` | `bool` | | Unload all running models | +| `--backend` | `string` | | Optional backend to target | + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_version.md b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_version.md new file mode 100644 index 00000000000..eb32c61fd97 --- /dev/null +++ b/_vendor/github.com/docker/model-runner/cmd/cli/docs/reference/model_version.md @@ -0,0 +1,8 @@ +# docker model version + +<!---MARKER_GEN_START--> +Show the Docker Model Runner version + + +<!---MARKER_GEN_END--> + diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_attestation_add.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_attestation_add.yaml deleted file mode 100644 index f6850825358..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_attestation_add.yaml +++ /dev/null @@ -1,54 +0,0 @@ -command: docker scout attestation add -aliases: docker scout attestation add, docker scout attest add -short: Add attestation to image -long: The docker scout attestation add command adds attestations to images. -usage: docker scout attestation add OPTIONS IMAGE [IMAGE...] -pname: docker scout attestation -plink: docker_scout_attestation.yaml -options: - - option: file - value_type: stringSlice - default_value: '[]' - description: File location of attestations to attach - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: predicate-type - value_type: string - description: Predicate-type for attestations - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -inherited_options: - - option: debug - value_type: bool - default_value: "false" - description: Debug messages - deprecated: false - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: verbose-debug - value_type: bool - default_value: "false" - description: Verbose debug - deprecated: false - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -deprecated: false -experimental: false -experimentalcli: true -kubernetes: false -swarm: false - diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_docker-cli-plugin-hooks.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_docker-cli-plugin-hooks.yaml deleted file mode 100644 index a36ba75bb9b..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_docker-cli-plugin-hooks.yaml +++ /dev/null @@ -1,33 +0,0 @@ -command: docker scout docker-cli-plugin-hooks -short: runs the plugins hooks -long: runs the plugins hooks -usage: docker scout docker-cli-plugin-hooks -pname: docker scout -plink: docker_scout.yaml -inherited_options: - - option: debug - value_type: bool - default_value: "false" - description: Debug messages - deprecated: false - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: verbose-debug - value_type: bool - default_value: "false" - description: Verbose debug - deprecated: false - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -deprecated: false -experimental: false -experimentalcli: false -kubernetes: false -swarm: false - diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_watch.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_watch.yaml deleted file mode 100644 index 30b6dbef471..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_watch.yaml +++ /dev/null @@ -1,161 +0,0 @@ -command: docker scout watch -short: | - Watch repositories in a registry and push images and indexes to Docker Scout (experimental) -long: |- - The `docker scout watch` command watches repositories in a registry - and pushes images or analysis results to Docker Scout. -usage: docker scout watch -pname: docker scout -plink: docker_scout.yaml -options: - - option: all-images - value_type: bool - default_value: "false" - description: | - Push all images instead of only the ones pushed during the watch command is running - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: dry-run - value_type: bool - default_value: "false" - description: Watch images and prepare them, but do not push them - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: interval - value_type: int64 - default_value: "60" - description: Interval in seconds between checks - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: org - value_type: string - description: Namespace of the Docker organization to which image will be pushed - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: refresh-registry - value_type: bool - default_value: "false" - description: | - Refresh the list of repositories of a registry at every run. Only with --registry. - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: registry - value_type: string - description: Registry to watch - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: repository - value_type: stringSlice - default_value: '[]' - description: Repository to watch - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: sbom - value_type: bool - default_value: "true" - description: Create and upload SBOMs - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: tag - value_type: stringSlice - default_value: '[]' - description: Regular expression to match tags to watch - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: workers - value_type: int - default_value: "3" - description: Number of concurrent workers - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -inherited_options: - - option: debug - value_type: bool - default_value: "false" - description: Debug messages - deprecated: false - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: verbose-debug - value_type: bool - default_value: "false" - description: Verbose debug - deprecated: false - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -examples: |- - ### Watch for new images from two repositories and push them - - ```console - $ docker scout watch --org my-org --repository registry-1.example.com/repo-1 --repository registry-2.example.com/repo-2 - ``` - - ### Only push images with a specific tag - - ```console - $ docker scout watch --org my-org --repository registry.example.com/my-service --tag latest - ``` - - ### Watch all repositories of a registry - - ```console - $ docker scout watch --org my-org --registry registry.example.com - ``` - - ### Push all images and not just the new ones - - ```console - $ docker scout watch--org my-org --repository registry.example.com/my-service --all-images - ``` -deprecated: false -experimental: false -experimentalcli: true -kubernetes: false -swarm: false - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout.md b/_vendor/github.com/docker/scout-cli/docs/scout.md deleted file mode 100644 index aeac72b4c6d..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout.md +++ /dev/null @@ -1,37 +0,0 @@ -# docker scout - -``` -docker scout COMMAND -``` - -<!---MARKER_GEN_START--> -Command line tool for Docker Scout - -### Subcommands - -| Name | Description | -|:--------------------------------------------------------------|:--------------------------------------------------------------------------------------------| -| [`attestation`](scout_attestation.md) | Manage attestations on image indexes | -| [`cache`](scout_cache.md) | Manage Docker Scout cache and temporary files | -| [`compare`](scout_compare.md) | Compare two images and display differences (experimental) | -| [`config`](scout_config.md) | Manage Docker Scout configuration | -| [`cves`](scout_cves.md) | Display CVEs identified in a software artifact | -| [`docker-cli-plugin-hooks`](scout_docker-cli-plugin-hooks.md) | runs the plugins hooks | -| [`enroll`](scout_enroll.md) | Enroll an organization with Docker Scout | -| [`environment`](scout_environment.md) | Manage environments (experimental) | -| [`help`](scout_help.md) | Display information about the available commands | -| [`integration`](scout_integration.md) | Commands to list, configure, and delete Docker Scout integrations | -| [`policy`](scout_policy.md) | Evaluate policies against an image and display the policy evaluation results (experimental) | -| [`push`](scout_push.md) | Push an image or image index to Docker Scout | -| [`quickview`](scout_quickview.md) | Quick overview of an image | -| [`recommendations`](scout_recommendations.md) | Display available base image updates and remediation recommendations | -| [`repo`](scout_repo.md) | Commands to list, enable, and disable Docker Scout on repositories | -| [`sbom`](scout_sbom.md) | Generate or display SBOM of an image | -| [`stream`](scout_stream.md) | Manage streams (experimental) | -| [`version`](scout_version.md) | Show Docker Scout version information | -| [`watch`](scout_watch.md) | Watch repositories in a registry and push images and indexes to Docker Scout (experimental) | - - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_attestation.md b/_vendor/github.com/docker/scout-cli/docs/scout_attestation.md deleted file mode 100644 index d4f6bc58277..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_attestation.md +++ /dev/null @@ -1,19 +0,0 @@ -# docker scout attestation - -<!---MARKER_GEN_START--> -Manage attestations on image indexes - -### Aliases - -`docker scout attestation`, `docker scout attest` - -### Subcommands - -| Name | Description | -|:----------------------------------|:-------------------------| -| [`add`](scout_attestation_add.md) | Add attestation to image | - - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_attestation_add.md b/_vendor/github.com/docker/scout-cli/docs/scout_attestation_add.md deleted file mode 100644 index 5f09c0fffda..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_attestation_add.md +++ /dev/null @@ -1,19 +0,0 @@ -# docker scout attestation add - -<!---MARKER_GEN_START--> -Add attestation to image - -### Aliases - -`docker scout attestation add`, `docker scout attest add` - -### Options - -| Name | Type | Default | Description | -|:-------------------|:--------------|:--------|:----------------------------------------| -| `--file` | `stringSlice` | | File location of attestations to attach | -| `--predicate-type` | `string` | | Predicate-type for attestations | - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_cache.md b/_vendor/github.com/docker/scout-cli/docs/scout_cache.md deleted file mode 100644 index 9bb212dd3db..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_cache.md +++ /dev/null @@ -1,16 +0,0 @@ -# docker scout cache - -<!---MARKER_GEN_START--> -Manage Docker Scout cache and temporary files - -### Subcommands - -| Name | Description | -|:--------------------------------|:--------------------------------| -| [`df`](scout_cache_df.md) | Show Docker Scout disk usage | -| [`prune`](scout_cache_prune.md) | Remove temporary or cached data | - - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_cache_df.md b/_vendor/github.com/docker/scout-cli/docs/scout_cache_df.md deleted file mode 100644 index 71dcf99560b..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_cache_df.md +++ /dev/null @@ -1,52 +0,0 @@ -# docker scout cache df - -<!---MARKER_GEN_START--> -Show Docker Scout disk usage - - -<!---MARKER_GEN_END--> - -## Description - -Docker Scout uses a temporary cache storage for generating image SBOMs. -The cache helps avoid regenerating or fetching resources unnecessarily. - -This `docker scout cache df` command shows the cached data on the host. -Each cache entry is identified by the digest of the image. - -You can use the `docker scout cache prune` command to delete cache data at any time. - -## Examples - -### List temporary and cache files - -```console -$ docker scout cache df -Docker Scout temporary directory to generate SBOMs is located at: - /var/folders/dw/d6h9w2sx6rv3lzwwgrnx7t5h0000gp/T/docker-scout - this path can be configured using the DOCKER_SCOUT_CACHE_DIR environment variable - - Image Digest │ Size -──────────────────────────────────────────────────────────────────────────┼──────── - sha256:c41ab5c992deb4fe7e5da09f67a8804a46bd0592bfdf0b1847dde0e0889d2bff │ 21 kB - -Total: 21 kB - - -Docker Scout cached SBOMs are located at: - /Users/user/.docker/scout/sbom - - Image Digest │ Size of SBOM -──────────────────────────────────────────────────────────────────────────┼─────────────── - sha256:02bb6f428431fbc2809c5d1b41eab5a68350194fb508869a33cb1af4444c9b11 │ 42 kB - sha256:03fc002fe4f370463a8f04d3a288cdffa861e462fc8b5be44ab62b296ad95183 │ 100 kB - sha256:088134dd33e4a2997480a1488a41c11abebda465da5cf7f305a0ecf8ed494329 │ 194 kB - sha256:0b80b2f17aff7ee5bfb135c69d0d6fe34070e89042b7aac73d1abcc79cfe6759 │ 852 kB - sha256:0c9e8abe31a5f17d84d5c85d3853d2f948a4f126421e89e68753591f1b6fedc5 │ 930 kB - sha256:0d49cae0723c8d310e413736b5e91e0c59b605ade2546f6e6ef8f1f3ddc76066 │ 510 kB - sha256:0ef04748d071c2e631bb3edce8f805cb5512e746b682c83fdae6d8c0b243280b │ 1.0 MB - sha256:13fd22925b638bb7d2131914bb8f8b0f5f582bee364aec682d9e7fe722bb486a │ 42 kB - sha256:174c41d4fbc7f63e1f2bb7d2f7837318050406f2f27e5073a84a84f18b48b883 │ 115 kB - -Total: 4 MB -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_cache_prune.md b/_vendor/github.com/docker/scout-cli/docs/scout_cache_prune.md deleted file mode 100644 index 7292884c7dc..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_cache_prune.md +++ /dev/null @@ -1,40 +0,0 @@ -# docker scout cache prune - -<!---MARKER_GEN_START--> -Remove temporary or cached data - -### Options - -| Name | Type | Default | Description | -|:----------------|:-----|:--------|:-------------------------------| -| `-f`, `--force` | | | Do not prompt for confirmation | -| `--sboms` | | | Prune cached SBOMs | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout cache prune` command removes temporary data and SBOM cache. - -By default, `docker scout cache prune` only deletes temporary data. -To delete temporary data and clear the SBOM cache, use the `--sboms` flag. - -## Examples - -### Delete temporary data - -```console -$ docker scout cache prune -? Are you sure to delete all temporary data? Yes - ✓ temporary data deleted -``` - -### Delete temporary _and_ cache data - -```console -$ docker scout cache prune --sboms -? Are you sure to delete all temporary data and all cached SBOMs? Yes - ✓ temporary data deleted - ✓ cached SBOMs deleted -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_compare.md b/_vendor/github.com/docker/scout-cli/docs/scout_compare.md deleted file mode 100644 index f25aa863550..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_compare.md +++ /dev/null @@ -1,110 +0,0 @@ -# docker scout compare - -<!---MARKER_GEN_START--> -Compare two images and display differences (experimental) - -### Aliases - -`docker scout compare`, `docker scout diff` - -### Options - -| Name | Type | Default | Description | -|:----------------------|:--------------|:--------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `-x`, `--exit-on` | `stringSlice` | | Comma separated list of conditions to fail the action step if worse, options are: vulnerability, policy | -| `--format` | `string` | `text` | Output format of the generated vulnerability report:<br>- text: default output, plain text with or without colors depending on the terminal<br>- markdown: Markdown output<br> | -| `--hide-policies` | | | Hide policy status from the output | -| `--ignore-base` | | | Filter out CVEs introduced from base image | -| `--ignore-unchanged` | | | Filter out unchanged packages | -| `--multi-stage` | | | Show packages from multi-stage Docker builds | -| `--only-fixed` | | | Filter to fixable CVEs | -| `--only-package-type` | `stringSlice` | | Comma separated list of package types (like apk, deb, rpm, npm, pypi, golang, etc) | -| `--only-policy` | `stringSlice` | | Comma separated list of policies to evaluate | -| `--only-severity` | `stringSlice` | | Comma separated list of severities (critical, high, medium, low, unspecified) to filter CVEs by | -| `--only-stage` | `stringSlice` | | Comma separated list of multi-stage Docker build stage names | -| `--only-unfixed` | | | Filter to unfixed CVEs | -| `--org` | `string` | | Namespace of the Docker organization | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to analyze | -| `--ref` | `string` | | Reference to use if the provided tarball contains multiple references.<br>Can only be used with archive | -| `--to` | `string` | | Image, directory, or archive to compare to | -| `--to-env` | `string` | | Name of environment to compare to | -| `--to-latest` | | | Latest image processed to compare to | -| `--to-ref` | `string` | | Reference to use if the provided tarball contains multiple references.<br>Can only be used with archive. | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout compare` command analyzes two images and displays a comparison. - -> This command is **experimental** and its behaviour might change in the future - -The intended use of this command is to compare two versions of the same image. -For instance, when a new image is built and compared to the version running in production. - -If no image is specified, the most recently built image is used -as a comparison target. - -The following artifact types are supported: - -- Images -- OCI layout directories -- Tarball archives, as created by `docker save` -- Local directory or file - -By default, the tool expects an image reference, such as: - -- `redis` -- `curlimages/curl:7.87.0` -- `mcr.microsoft.com/dotnet/runtime:7.0` - -If the artifact you want to analyze is an OCI directory, a tarball archive, a local file or directory, -or if you want to control from where the image will be resolved, you must prefix the reference with one of the following: - -- `image://` (default) use a local image, or fall back to a registry lookup -- `local://` use an image from the local image store (don't do a registry lookup) -- `registry://` use an image from a registry (don't use a local image) -- `oci-dir://` use an OCI layout directory -- `archive://` use a tarball archive, as created by `docker save` -- `fs://` use a local directory or file -- `sbom://` SPDX file or in-toto attestation file with SPDX predicate or `syft` json SBOM file - -## Examples - -### Compare the most recently built image to the latest tag - -```console -$ docker scout compare --to namespace/repo:latest -``` - -### Compare local build to the same tag from the registry - -```console -$ docker scout compare local://namespace/repo:latest --to registry://namespace/repo:latest -``` - -### Ignore base images - -```console -$ docker scout compare --ignore-base --to namespace/repo:latest namespace/repo:v1.2.3-pre -``` - -### Generate a markdown output - -```console -$ docker scout compare --format markdown --to namespace/repo:latest namespace/repo:v1.2.3-pre -``` - -### Only compare maven packages and only display critical vulnerabilities for maven packages - -```console -$ docker scout compare --only-package-type maven --only-severity critical --to namespace/repo:latest namespace/repo:v1.2.3-pre -``` - -### Show all policy results for both images - -```console -docker scout compare --to namespace/repo:latest namespace/repo:v1.2.3-pre -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_config.md b/_vendor/github.com/docker/scout-cli/docs/scout_config.md deleted file mode 100644 index 1a6e8b69c9a..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_config.md +++ /dev/null @@ -1,38 +0,0 @@ -# docker scout config - -<!---MARKER_GEN_START--> -Manage Docker Scout configuration - - -<!---MARKER_GEN_END--> - -## Description - -`docker scout config` allows you to list, get and set Docker Scout configuration. - -Available configuration key: - -- `organization`: Namespace of the Docker organization to be used by default. - -## Examples - -### List existing configuration - -```console -$ docker scout config -organization=my-org-namespace -``` - -### Print configuration value - -```console -$ docker scout config organization -my-org-namespace -``` - -### Set configuration value - -```console -$ docker scout config organization my-org-namespace - ✓ Successfully set organization to my-org-namespace -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_cves.md b/_vendor/github.com/docker/scout-cli/docs/scout_cves.md deleted file mode 100644 index bdb7f82921d..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_cves.md +++ /dev/null @@ -1,271 +0,0 @@ -# docker scout cves - -``` -docker scout cves [OPTIONS] [IMAGE|DIRECTORY|ARCHIVE] -``` - -<!---MARKER_GEN_START--> -Display CVEs identified in a software artifact - -### Options - -| Name | Type | Default | Description | -|:-----------------------|:--------------|:-----------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `--details` | | | Print details on default text output | -| `--env` | `string` | | Name of environment | -| [`--epss`](#epss) | | | Display the EPSS scores and organize the package's CVEs according to their EPSS score | -| `--epss-percentile` | `float32` | `0` | Exclude CVEs with EPSS scores less than the specified percentile (0 to 1) | -| `--epss-score` | `float32` | `0` | Exclude CVEs with EPSS scores less than the specified value (0 to 1) | -| `-e`, `--exit-code` | | | Return exit code '2' if vulnerabilities are detected | -| `--format` | `string` | `packages` | Output format of the generated vulnerability report:<br>- packages: default output, plain text with vulnerabilities grouped by packages<br>- sarif: json Sarif output<br>- spdx: json SPDX output<br>- gitlab: json GitLab output<br>- markdown: markdown output (including some html tags like collapsible sections)<br>- sbom: json SBOM output<br> | -| `--ignore-base` | | | Filter out CVEs introduced from base image | -| `--ignore-suppressed` | | | Filter CVEs found in Scout exceptions based on the specified exception scope | -| `--locations` | | | Print package locations including file paths and layer diff_id | -| `--multi-stage` | | | Show packages from multi-stage Docker builds | -| `--only-base` | | | Only show CVEs introduced by the base image | -| `--only-cisa-kev` | | | Filter to CVEs listed in the CISA KEV catalog | -| `--only-cve-id` | `stringSlice` | | Comma separated list of CVE ids (like CVE-2021-45105) to search for | -| `--only-fixed` | | | Filter to fixable CVEs | -| `--only-metric` | `stringSlice` | | Comma separated list of CVSS metrics (like AV:N or PR:L) to filter CVEs by | -| `--only-package` | `stringSlice` | | Comma separated regular expressions to filter packages by | -| `--only-package-type` | `stringSlice` | | Comma separated list of package types (like apk, deb, rpm, npm, pypi, golang, etc) | -| `--only-severity` | `stringSlice` | | Comma separated list of severities (critical, high, medium, low, unspecified) to filter CVEs by | -| `--only-stage` | `stringSlice` | | Comma separated list of multi-stage Docker build stage names | -| `--only-unfixed` | | | Filter to unfixed CVEs | -| `--only-vex-affected` | | | Filter CVEs by VEX statements with status not affected | -| `--only-vuln-packages` | | | When used with --format=only-packages ignore packages with no vulnerabilities | -| `--org` | `string` | | Namespace of the Docker organization | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to analyze | -| `--ref` | `string` | | Reference to use if the provided tarball contains multiple references.<br>Can only be used with archive | -| `--vex-author` | `stringSlice` | | List of VEX statement authors to accept | -| `--vex-location` | `stringSlice` | | File location of directory or file containing VEX statements | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout cves` command analyzes a software artifact for vulnerabilities. - -If no image is specified, the most recently built image is used. - -The following artifact types are supported: - -- Images -- OCI layout directories -- Tarball archives, as created by `docker save` -- Local directory or file - -By default, the tool expects an image reference, such as: - -- `redis` -- `curlimages/curl:7.87.0` -- `mcr.microsoft.com/dotnet/runtime:7.0` - -If the artifact you want to analyze is an OCI directory, a tarball archive, a local file or directory, -or if you want to control from where the image will be resolved, you must prefix the reference with one of the following: - -- `image://` (default) use a local image, or fall back to a registry lookup -- `local://` use an image from the local image store (don't do a registry lookup) -- `registry://` use an image from a registry (don't use a local image) -- `oci-dir://` use an OCI layout directory -- `archive://` use a tarball archive, as created by `docker save` -- `fs://` use a local directory or file -- `sbom://` SPDX file or in-toto attestation file with SPDX predicate or `syft` json SBOM file - In case of `sbom://` prefix, if the file is not defined then it will try to read it from the standard input. - -## Examples - -### Display vulnerabilities grouped by package - -```console -$ docker scout cves alpine -Analyzing image alpine -✓ Image stored for indexing -✓ Indexed 18 packages -✓ No vulnerable package detected -``` - -### Display vulnerabilities from a `docker save` tarball - -```console -$ docker save alpine > alpine.tar - -$ docker scout cves archive://alpine.tar -Analyzing archive alpine.tar -✓ Archive read -✓ SBOM of image already cached, 18 packages indexed -✓ No vulnerable package detected -``` - -### Display vulnerabilities from an OCI directory - -```console -$ skopeo copy --override-os linux docker://alpine oci:alpine - -$ docker scout cves oci-dir://alpine -Analyzing OCI directory alpine -✓ OCI directory read -✓ Image stored for indexing -✓ Indexed 19 packages -✓ No vulnerable package detected -``` - -### Display vulnerabilities from the current directory - -```console -$ docker scout cves fs://. -``` - -### Export vulnerabilities to a SARIF JSON file - -```console -$ docker scout cves --format sarif --output alpine.sarif.json alpine -Analyzing image alpine -✓ SBOM of image already cached, 18 packages indexed -✓ No vulnerable package detected -✓ Report written to alpine.sarif.json -``` - -### Display markdown output - -The following example shows how to generate the vulnerability report as markdown. - -```console -$ docker scout cves --format markdown alpine -✓ Pulled -✓ SBOM of image already cached, 19 packages indexed -✗ Detected 1 vulnerable package with 3 vulnerabilities -<h2>:mag: Vulnerabilities of <code>alpine</code></h2> - -<details open="true"><summary>:package: Image Reference</strong> <code>alpine</code></summary> -<table> -<tr><td>digest</td><td><code>sha256:e3bd82196e98898cae9fe7fbfd6e2436530485974dc4fb3b7ddb69134eda2407</code></td><tr><tr><td>vulnerabilities</td><td><img alt="critical: 0" src="https://img.shields.io/badge/critical-0-lightgrey"/> <img alt="high: 0" src="https://img.shields.io/badge/high-0-lightgrey"/> <img alt="medium: 2" src="https://img.shields.io/badge/medium-2-fbb552"/> <img alt="low: 0" src="https://img.shields.io/badge/low-0-lightgrey"/> <img alt="unspecified: 1" src="https://img.shields.io/badge/unspecified-1-lightgrey"/></td></tr> -<tr><td>platform</td><td>linux/arm64</td></tr> -<tr><td>size</td><td>3.3 MB</td></tr> -<tr><td>packages</td><td>19</td></tr> -</table> -</details></table> -</details> -... -``` - -### List all vulnerable packages of a certain type - -The following example shows how to generate a list of packages, only including -packages of the specified type, and only showing packages that are vulnerable. - -```console -$ docker scout cves --format only-packages --only-package-type golang --only-vuln-packages golang:1.18.0 -✓ Pulled -✓ SBOM of image already cached, 296 packages indexed -✗ Detected 1 vulnerable package with 40 vulnerabilities - -Name Version Type Vulnerabilities -─────────────────────────────────────────────────────────── -stdlib 1.18 golang 2C 29H 8M 1L -``` - -### <a name="epss"></a> Display EPSS score (--epss) - -The `--epss` flag adds [Exploit Prediction Scoring System (EPSS)](https://www.first.org/epss/) -scores to the `docker scout cves` output. EPSS scores are estimates of the likelihood (probability) -that a software vulnerability will be exploited in the wild in the next 30 days. -The higher the score, the greater the probability that a vulnerability will be exploited. - -```console {hl_lines="13,14"} -$ docker scout cves --epss nginx - ✓ Provenance obtained from attestation - ✓ SBOM obtained from attestation, 232 packages indexed - ✓ Pulled - ✗ Detected 23 vulnerable packages with a total of 39 vulnerabilities - -... - - ✗ HIGH CVE-2023-52425 - https://scout.docker.com/v/CVE-2023-52425 - Affected range : >=2.5.0-1 - Fixed version : not fixed - EPSS Score : 0.000510 - EPSS Percentile : 0.173680 -``` - -- `EPSS Score` is a floating point number between 0 and 1 representing the probability of exploitation in the wild in the next 30 days (following score publication). -- `EPSS Percentile` is the percentile of the current score, the proportion of all scored vulnerabilities with the same or a lower EPSS score. - -You can use the `--epss-score` and `--epss-percentile` flags to filter the output -of `docker scout cves` based on these scores. For example, -to only show vulnerabilities with an EPSS score higher than 0.5: - -```console -$ docker scout cves --epss --epss-score 0.5 nginx - ✓ SBOM of image already cached, 232 packages indexed - ✓ EPSS scores for 2024-03-01 already cached - ✗ Detected 1 vulnerable package with 1 vulnerability - -... - - ✗ LOW CVE-2023-44487 - https://scout.docker.com/v/CVE-2023-44487 - Affected range : >=1.22.1-9 - Fixed version : not fixed - EPSS Score : 0.705850 - EPSS Percentile : 0.979410 -``` - -EPSS scores are updated on a daily basis. -By default, the latest available score is displayed. -You can use the `--epss-date` flag to manually specify a date -in the format `yyyy-mm-dd` for fetching EPSS scores. - -```console -$ docker scout cves --epss --epss-date 2024-01-02 nginx -``` - -### List vulnerabilities from an SPDX file - -The following example shows how to generate a list of vulnerabilities from an SPDX file using `syft`. - -```console -$ syft -o spdx-json alpine:3.16.1 | docker scout cves sbom:// - ✔ Pulled image - ✔ Loaded image alpine:3.16.1 - ✔ Parsed image sha256:3d81c46cd8756ddb6db9ec36fa06a6fb71c287fb265232ba516739dc67a5f07d - ✔ Cataloged contents 274a317d88b54f9e67799244a1250cad3fe7080f45249fa9167d1f871218d35f - ├── ✔ Packages [14 packages] - ├── ✔ File digests [75 files] - ├── ✔ File metadata [75 locations] - └── ✔ Executables [16 executables] - ✗ Detected 2 vulnerable packages with a total of 11 vulnerabilities - - -## Overview - - │ Analyzed SBOM -────────────────────┼────────────────────────────── - Target │ <stdin> - digest │ 274a317d88b5 - platform │ linux/arm64 - vulnerabilities │ 1C 2H 8M 0L - packages │ 15 - - -## Packages and Vulnerabilities - - 1C 0H 0M 0L zlib 1.2.12-r1 -pkg:apk/alpine/zlib@1.2.12-r1?arch=aarch64&distro=alpine-3.16.1 - - ✗ CRITICAL CVE-2022-37434 - https://scout.docker.com/v/CVE-2022-37434 - Affected range : <1.2.12-r2 - Fixed version : 1.2.12-r2 - - ... - -11 vulnerabilities found in 2 packages - CRITICAL 1 - HIGH 2 - MEDIUM 8 - LOW 0 -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_docker-cli-plugin-hooks.md b/_vendor/github.com/docker/scout-cli/docs/scout_docker-cli-plugin-hooks.md deleted file mode 100644 index 8fbcd042036..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_docker-cli-plugin-hooks.md +++ /dev/null @@ -1,8 +0,0 @@ -# docker scout docker-cli-plugin-hooks - -<!---MARKER_GEN_START--> -runs the plugins hooks - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_enroll.md b/_vendor/github.com/docker/scout-cli/docs/scout_enroll.md deleted file mode 100644 index b60fd3471f6..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_enroll.md +++ /dev/null @@ -1,11 +0,0 @@ -# docker scout enroll - -<!---MARKER_GEN_START--> -Enroll an organization with Docker Scout - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout enroll` command enrolls an organization with Docker Scout. diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_environment.md b/_vendor/github.com/docker/scout-cli/docs/scout_environment.md deleted file mode 100644 index 4f019ff35ce..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_environment.md +++ /dev/null @@ -1,58 +0,0 @@ -# docker scout environment - -<!---MARKER_GEN_START--> -Manage environments (experimental) - -### Aliases - -`docker scout environment`, `docker scout env` - -### Options - -| Name | Type | Default | Description | -|:-----------------|:---------|:--------|:-------------------------------------| -| `--org` | `string` | | Namespace of the Docker organization | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to record | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout environment` command lists the environments. -If you pass an image reference, the image is recorded to the specified environment. - -Once recorded, environments can be referred to by their name. For example, -you can refer to the `production` environment with the `docker scout compare` -command as follows: - -```console -$ docker scout compare --to-env production -``` - -## Examples - -### List existing environments - -```console -$ docker scout environment -prod -staging -``` - -### List images of an environment - -```console -$ docker scout environment staging -namespace/repo:tag@sha256:9a4df4fadc9bbd44c345e473e0688c2066a6583d4741679494ba9228cfd93e1b -namespace/other-repo:tag@sha256:0001d6ce124855b0a158569c584162097fe0ca8d72519067c2c8e3ce407c580f -``` - -### Record an image to an environment, for a specific platform - -```console -$ docker scout environment staging namespace/repo:stage-latest --platform linux/amd64 -✓ Pulled -✓ Successfully recorded namespace/repo:stage-latest in environment staging -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_help.md b/_vendor/github.com/docker/scout-cli/docs/scout_help.md deleted file mode 100644 index ec152c6aaf9..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_help.md +++ /dev/null @@ -1,8 +0,0 @@ -# docker scout help - -<!---MARKER_GEN_START--> -Display information about the available commands - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_integration.md b/_vendor/github.com/docker/scout-cli/docs/scout_integration.md deleted file mode 100644 index 9a2def3a0b8..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_integration.md +++ /dev/null @@ -1,17 +0,0 @@ -# docker scout integration - -<!---MARKER_GEN_START--> -Commands to list, configure, and delete Docker Scout integrations - -### Subcommands - -| Name | Description | -|:----------------------------------------------|:----------------------------------------------------| -| [`configure`](scout_integration_configure.md) | Configure or update a new integration configuration | -| [`delete`](scout_integration_delete.md) | Delete a new integration configuration | -| [`list`](scout_integration_list.md) | Integration Docker Scout | - - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_integration_configure.md b/_vendor/github.com/docker/scout-cli/docs/scout_integration_configure.md deleted file mode 100644 index 521193ae3bc..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_integration_configure.md +++ /dev/null @@ -1,16 +0,0 @@ -# docker scout integration configure - -<!---MARKER_GEN_START--> -Configure or update a new integration configuration - -### Options - -| Name | Type | Default | Description | -|:--------------|:--------------|:--------|:-------------------------------------------------------------| -| `--name` | `string` | | Name of integration configuration to create | -| `--org` | `string` | | Namespace of the Docker organization | -| `--parameter` | `stringSlice` | | Integration parameters in the form of --parameter NAME=VALUE | - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_integration_delete.md b/_vendor/github.com/docker/scout-cli/docs/scout_integration_delete.md deleted file mode 100644 index 0a68c8adcaf..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_integration_delete.md +++ /dev/null @@ -1,15 +0,0 @@ -# docker scout integration delete - -<!---MARKER_GEN_START--> -Delete a new integration configuration - -### Options - -| Name | Type | Default | Description | -|:---------|:---------|:--------|:--------------------------------------------| -| `--name` | `string` | | Name of integration configuration to delete | -| `--org` | `string` | | Namespace of the Docker organization | - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_integration_list.md b/_vendor/github.com/docker/scout-cli/docs/scout_integration_list.md deleted file mode 100644 index 67b39c59fc5..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_integration_list.md +++ /dev/null @@ -1,15 +0,0 @@ -# docker scout integration list - -<!---MARKER_GEN_START--> -Integration Docker Scout - -### Options - -| Name | Type | Default | Description | -|:---------|:---------|:--------|:------------------------------------------| -| `--name` | `string` | | Name of integration configuration to list | -| `--org` | `string` | | Namespace of the Docker organization | - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_policy.md b/_vendor/github.com/docker/scout-cli/docs/scout_policy.md deleted file mode 100644 index 46735c018d7..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_policy.md +++ /dev/null @@ -1,52 +0,0 @@ -# docker scout policy - -<!---MARKER_GEN_START--> -Evaluate policies against an image and display the policy evaluation results (experimental) - -### Options - -| Name | Type | Default | Description | -|:--------------------|:--------------|:--------|:------------------------------------------------------------| -| `-e`, `--exit-code` | | | Return exit code '2' if policies are not met, '0' otherwise | -| `--only-policy` | `stringSlice` | | Comma separated list of policies to evaluate | -| `--org` | `string` | | Namespace of the Docker organization | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to pull policy results from | -| `--to-env` | `string` | | Name of the environment to compare to | -| `--to-latest` | | | Latest image processed to compare to | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout policy` command evaluates policies against an image. -The image analysis is uploaded to Docker Scout where policies get evaluated. - -The policy evaluation results may take a few minutes to become available. - -## Examples - -### Evaluate policies against an image and display the results - -```console -$ docker scout policy dockerscoutpolicy/customers-api-service:0.0.1 -``` - -### Evaluate policies against an image for a specific organization - -```console -$ docker scout policy dockerscoutpolicy/customers-api-service:0.0.1 --org dockerscoutpolicy -``` - -### Evaluate policies against an image with a specific platform - -```console -$ docker scout policy dockerscoutpolicy/customers-api-service:0.0.1 --platform linux/amd64 -``` - -### Compare policy results for a repository in a specific environment - -```console -$ docker scout policy dockerscoutpolicy/customers-api-service --to-env production -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_push.md b/_vendor/github.com/docker/scout-cli/docs/scout_push.md deleted file mode 100644 index 09e3397e5c7..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_push.md +++ /dev/null @@ -1,31 +0,0 @@ -# docker scout push - -<!---MARKER_GEN_START--> -Push an image or image index to Docker Scout - -### Options - -| Name | Type | Default | Description | -|:-----------------|:---------|:--------|:-------------------------------------------------------------------| -| `--author` | `string` | | Name of the author of the image | -| `--dry-run` | | | Do not push the image but process it | -| `--org` | `string` | | Namespace of the Docker organization to which image will be pushed | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to be pushed | -| `--sbom` | | | Create and upload SBOMs | -| `--timestamp` | `string` | | Timestamp of image or tag creation | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout push` command lets you push an image or analysis result to Docker Scout. - -## Examples - -### Push an image to Docker Scout - -```console -$ docker scout push --org my-org registry.example.com/repo:tag -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_quickview.md b/_vendor/github.com/docker/scout-cli/docs/scout_quickview.md deleted file mode 100644 index 3bf752a0cf9..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_quickview.md +++ /dev/null @@ -1,101 +0,0 @@ -# docker scout quickview - -<!---MARKER_GEN_START--> -Quick overview of an image - -### Aliases - -`docker scout quickview`, `docker scout qv` - -### Options - -| Name | Type | Default | Description | -|:----------------------|:--------------|:--------|:--------------------------------------------------------------------------------------------------------| -| `--env` | `string` | | Name of the environment | -| `--ignore-suppressed` | | | Filter CVEs found in Scout exceptions based on the specified exception scope | -| `--latest` | | | Latest indexed image | -| `--only-policy` | `stringSlice` | | Comma separated list of policies to evaluate | -| `--only-vex-affected` | | | Filter CVEs by VEX statements with status not affected | -| `--org` | `string` | | Namespace of the Docker organization | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to analyze | -| `--ref` | `string` | | Reference to use if the provided tarball contains multiple references.<br>Can only be used with archive | -| `--vex-author` | `stringSlice` | | List of VEX statement authors to accept | -| `--vex-location` | `stringSlice` | | File location of directory or file containing VEX statements | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout quickview` command displays a quick overview of an image. -It displays a summary of the vulnerabilities in the specified image -and vulnerabilities from the base image. -If available, it also displays base image refresh and update recommendations. - -If no image is specified, the most recently built image is used. - -The following artifact types are supported: - -- Images -- OCI layout directories -- Tarball archives, as created by `docker save` -- Local directory or file - -By default, the tool expects an image reference, such as: - -- `redis` -- `curlimages/curl:7.87.0` -- `mcr.microsoft.com/dotnet/runtime:7.0` - -If the artifact you want to analyze is an OCI directory, a tarball archive, a local file or directory, -or if you want to control from where the image will be resolved, you must prefix the reference with one of the following: - -- `image://` (default) use a local image, or fall back to a registry lookup -- `local://` use an image from the local image store (don't do a registry lookup) -- `registry://` use an image from a registry (don't use a local image) -- `oci-dir://` use an OCI layout directory -- `archive://` use a tarball archive, as created by `docker save` -- `fs://` use a local directory or file -- `sbom://` SPDX file or in-toto attestation file with SPDX predicate or `syft` json SBOM file - In case of `sbom://` prefix, if the file is not defined then it will try to read it from the standard input. - -## Examples - -### Quick overview of an image - -```console -$ docker scout quickview golang:1.19.4 - ...Pulling - ✓ Pulled - ✓ SBOM of image already cached, 278 packages indexed - - Your image golang:1.19.4 │ 5C 3H 6M 63L - Base image buildpack-deps:bullseye-scm │ 5C 1H 3M 48L 6? - Refreshed base image buildpack-deps:bullseye-scm │ 0C 0H 0M 42L - │ -5 -1 -3 -6 -6 - Updated base image buildpack-deps:sid-scm │ 0C 0H 1M 29L - │ -5 -1 -2 -19 -6 -``` - -### Quick overview of the most recently built image - -```console -$ docker scout qv -``` - -### Quick overview from an SPDX file - -```console -$ syft -o spdx-json alpine:3.16.1 | docker scout quickview sbom:// - ✔ Loaded image alpine:3.16.1 - ✔ Parsed image sha256:3d81c46cd8756ddb6db9ec36fa06a6fb71c287fb265232ba516739dc67a5f07d - ✔ Cataloged contents 274a317d88b54f9e67799244a1250cad3fe7080f45249fa9167d1f871218d35f - ├── ✔ Packages [14 packages] - ├── ✔ File digests [75 files] - ├── ✔ File metadata [75 locations] - └── ✔ Executables [16 executables] - - Target │ <stdin> │ 1C 2H 8M 0L - digest │ 274a317d88b5 │ -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_recommendations.md b/_vendor/github.com/docker/scout-cli/docs/scout_recommendations.md deleted file mode 100644 index f1ccdf64fee..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_recommendations.md +++ /dev/null @@ -1,71 +0,0 @@ -# docker scout recommendations - -<!---MARKER_GEN_START--> -Display available base image updates and remediation recommendations - -### Options - -| Name | Type | Default | Description | -|:-----------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------| -| `--only-refresh` | | | Only display base image refresh recommendations | -| `--only-update` | | | Only display base image update recommendations | -| `--org` | `string` | | Namespace of the Docker organization | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to analyze | -| `--ref` | `string` | | Reference to use if the provided tarball contains multiple references.<br>Can only be used with archive | -| `--tag` | `string` | | Specify tag | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout recommendations` command display recommendations for base images updates. -It analyzes the image and display recommendations to refresh or update the base image. -For each recommendation it shows a list of benefits, such as -fewer vulnerabilities or smaller image size. - -If no image is specified, the most recently built image is used. - -The following artifact types are supported: - -- Images -- OCI layout directories -- Tarball archives, as created by `docker save` -- Local directory or file - -By default, the tool expects an image reference, such as: - -- `redis` -- `curlimages/curl:7.87.0` -- `mcr.microsoft.com/dotnet/runtime:7.0` - -If the artifact you want to analyze is an OCI directory, a tarball archive, a local file or directory, -or if you want to control from where the image will be resolved, you must prefix the reference with one of the following: - -- `image://` (default) use a local image, or fall back to a registry lookup -- `local://` use an image from the local image store (don't do a registry lookup) -- `registry://` use an image from a registry (don't use a local image) -- `oci-dir://` use an OCI layout directory -- `archive://` use a tarball archive, as created by `docker save` -- `fs://` use a local directory or file - -## Examples - -### Display base image update recommendations - -```console -$ docker scout recommendations golang:1.19.4 -``` - -### Display base image refresh only recommendations - -```console -$ docker scout recommendations --only-refresh golang:1.19.4 -``` - -### Display base image update only recommendations - -```console -$ docker scout recommendations --only-update golang:1.19.4 -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_repo.md b/_vendor/github.com/docker/scout-cli/docs/scout_repo.md deleted file mode 100644 index 1f2038ea757..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_repo.md +++ /dev/null @@ -1,17 +0,0 @@ -# docker scout repo - -<!---MARKER_GEN_START--> -Commands to list, enable, and disable Docker Scout on repositories - -### Subcommands - -| Name | Description | -|:-----------------------------------|:-------------------------------| -| [`disable`](scout_repo_disable.md) | Disable Docker Scout | -| [`enable`](scout_repo_enable.md) | Enable Docker Scout | -| [`list`](scout_repo_list.md) | List Docker Scout repositories | - - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_repo_disable.md b/_vendor/github.com/docker/scout-cli/docs/scout_repo_disable.md deleted file mode 100644 index 24842906b81..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_repo_disable.md +++ /dev/null @@ -1,43 +0,0 @@ -# docker scout repo disable - -<!---MARKER_GEN_START--> -Disable Docker Scout - -### Options - -| Name | Type | Default | Description | -|:----------------|:---------|:--------|:-----------------------------------------------------------------------------| -| `--all` | | | Disable all repositories of the organization. Can not be used with --filter. | -| `--filter` | `string` | | Regular expression to filter repositories by name | -| `--integration` | `string` | | Name of the integration to use for enabling an image | -| `--org` | `string` | | Namespace of the Docker organization | -| `--registry` | `string` | | Container Registry | - - -<!---MARKER_GEN_END--> - -## Examples - -### Disable a specific repository - -```console -$ docker scout repo disable my/repository -``` - -### Disable all repositories of the organization - -```console -$ docker scout repo disable --all -``` - -### Disable some repositories based on a filter - -```console -$ docker scout repo disable --filter namespace/backend -``` - -### Disable a repository from a specific registry - -```console -$ docker scout repo disable my/repository --registry 123456.dkr.ecr.us-east-1.amazonaws.com -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_repo_enable.md b/_vendor/github.com/docker/scout-cli/docs/scout_repo_enable.md deleted file mode 100644 index 3065a68bccd..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_repo_enable.md +++ /dev/null @@ -1,43 +0,0 @@ -# docker scout repo enable - -<!---MARKER_GEN_START--> -Enable Docker Scout - -### Options - -| Name | Type | Default | Description | -|:----------------|:---------|:--------|:----------------------------------------------------------------------------| -| `--all` | | | Enable all repositories of the organization. Can not be used with --filter. | -| `--filter` | `string` | | Regular expression to filter repositories by name | -| `--integration` | `string` | | Name of the integration to use for enabling an image | -| `--org` | `string` | | Namespace of the Docker organization | -| `--registry` | `string` | | Container Registry | - - -<!---MARKER_GEN_END--> - -## Examples - -### Enable a specific repository - -```console -$ docker scout repo enable my/repository -``` - -### Enable all repositories of the organization - -```console -$ docker scout repo enable --all -``` - -### Enable some repositories based on a filter - -```console -$ docker scout repo enable --filter namespace/backend -``` - -### Enable a repository from a specific registry - -```console -$ docker scout repo enable my/repository --registry 123456.dkr.ecr.us-east-1.amazonaws.com -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_repo_list.md b/_vendor/github.com/docker/scout-cli/docs/scout_repo_list.md deleted file mode 100644 index 1e2d740574e..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_repo_list.md +++ /dev/null @@ -1,18 +0,0 @@ -# docker scout repo list - -<!---MARKER_GEN_START--> -List Docker Scout repositories - -### Options - -| Name | Type | Default | Description | -|:------------------|:---------|:--------|:---------------------------------------------------------------------------| -| `--filter` | `string` | | Regular expression to filter repositories by name | -| `--only-disabled` | | | Filter to disabled repositories only | -| `--only-enabled` | | | Filter to enabled repositories only | -| `--only-registry` | `string` | | Filter to a specific registry only:<br>- hub.docker.com<br>- ecr (AWS ECR) | -| `--org` | `string` | | Namespace of the Docker organization | - - -<!---MARKER_GEN_END--> - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_sbom.md b/_vendor/github.com/docker/scout-cli/docs/scout_sbom.md deleted file mode 100644 index a335d5f83f2..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_sbom.md +++ /dev/null @@ -1,83 +0,0 @@ -# docker scout sbom - -<!---MARKER_GEN_START--> -Generate or display SBOM of an image - -### Options - -| Name | Type | Default | Description | -|:----------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `--format` | `string` | `json` | Output format:<br>- list: list of packages of the image<br>- json: json representation of the SBOM<br>- spdx: spdx representation of the SBOM<br>- cyclonedx: cyclone dx representation of the SBOM | -| `--only-package-type` | `stringSlice` | | Comma separated list of package types (like apk, deb, rpm, npm, pypi, golang, etc)<br>Can only be used with --format list | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to analyze | -| `--ref` | `string` | | Reference to use if the provided tarball contains multiple references.<br>Can only be used with archive | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout sbom` command analyzes a software artifact to generate a -Software Bill Of Materials (SBOM). - -The SBOM contains a list of all packages in the image. -You can use the `--format` flag to filter the output of the command -to display only packages of a specific type. - -If no image is specified, the most recently built image is used. - -The following artifact types are supported: - -- Images -- OCI layout directories -- Tarball archives, as created by `docker save` -- Local directory or file - -By default, the tool expects an image reference, such as: - -- `redis` -- `curlimages/curl:7.87.0` -- `mcr.microsoft.com/dotnet/runtime:7.0` - -If the artifact you want to analyze is an OCI directory, a tarball archive, a local file or directory, -or if you want to control from where the image will be resolved, you must prefix the reference with one of the following: - -- `image://` (default) use a local image, or fall back to a registry lookup -- `local://` use an image from the local image store (don't do a registry lookup) -- `registry://` use an image from a registry (don't use a local image) -- `oci-dir://` use an OCI layout directory -- `archive://` use a tarball archive, as created by `docker save` -- `fs://` use a local directory or file - -## Examples - -### Display the list of packages - -```console -$ docker scout sbom --format list alpine -``` - -### Only display packages of a specific type - -```console - $ docker scout sbom --format list --only-package-type apk alpine -``` - -### Display the full SBOM in JSON format - -```console -$ docker scout sbom alpine -``` - -### Display the full SBOM of the most recently built image - -```console -$ docker scout sbom -``` - -### Write SBOM to a file - -```console -$ docker scout sbom --output alpine.sbom alpine -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_stream.md b/_vendor/github.com/docker/scout-cli/docs/scout_stream.md deleted file mode 100644 index 886df3e6cf0..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_stream.md +++ /dev/null @@ -1,47 +0,0 @@ -# docker scout stream - -<!---MARKER_GEN_START--> -Manage streams (experimental) - -### Options - -| Name | Type | Default | Description | -|:-----------------|:---------|:--------|:-------------------------------------| -| `--org` | `string` | | Namespace of the Docker organization | -| `-o`, `--output` | `string` | | Write the report to a file | -| `--platform` | `string` | | Platform of image to record | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout stream` command lists the deployment streams and records an image to it. - -Once recorded, streams can be referred to by their name, eg. in the `docker scout compare` command using `--to-stream`. - -## Examples - -### List existing streams - -```console -$ %[1]s %[2]s -prod-cluster-123 -stage-cluster-234 -``` - -### List images of a stream - -```console -$ %[1]s %[2]s prod-cluster-123 -namespace/repo:tag@sha256:9a4df4fadc9bbd44c345e473e0688c2066a6583d4741679494ba9228cfd93e1b -namespace/other-repo:tag@sha256:0001d6ce124855b0a158569c584162097fe0ca8d72519067c2c8e3ce407c580f -``` - -### Record an image to a stream, for a specific platform - -```console -$ %[1]s %[2]s stage-cluster-234 namespace/repo:stage-latest --platform linux/amd64 -✓ Pulled -✓ Successfully recorded namespace/repo:stage-latest in stream stage-cluster-234 -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_version.md b/_vendor/github.com/docker/scout-cli/docs/scout_version.md deleted file mode 100644 index 5365123c05d..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_version.md +++ /dev/null @@ -1,38 +0,0 @@ -# docker scout version - -``` -docker scout version -``` - -<!---MARKER_GEN_START--> -Show Docker Scout version information - - -<!---MARKER_GEN_END--> - -## Examples - -```console -$ docker scout version - - ⢀⢀⢀ ⣀⣀⡤⣔⢖⣖⢽⢝ - ⡠⡢⡣⡣⡣⡣⡣⡣⡢⡀ ⢀⣠⢴⡲⣫⡺⣜⢞⢮⡳⡵⡹⡅ - ⡜⡜⡜⡜⡜⡜⠜⠈⠈ ⠁⠙⠮⣺⡪⡯⣺⡪⡯⣺ - ⢘⢜⢜⢜⢜⠜ ⠈⠪⡳⡵⣹⡪⠇ - ⠨⡪⡪⡪⠂ ⢀⡤⣖⢽⡹⣝⡝⣖⢤⡀ ⠘⢝⢮⡚ _____ _ - ⠱⡱⠁ ⡴⡫⣞⢮⡳⣝⢮⡺⣪⡳⣝⢦ ⠘⡵⠁ / ____| Docker | | - ⠁ ⣸⢝⣕⢗⡵⣝⢮⡳⣝⢮⡺⣪⡳⣣ ⠁ | (___ ___ ___ _ _| |_ - ⣗⣝⢮⡳⣝⢮⡳⣝⢮⡳⣝⢮⢮⡳ \___ \ / __/ _ \| | | | __| - ⢀ ⢱⡳⡵⣹⡪⡳⣝⢮⡳⣝⢮⡳⡣⡏ ⡀ ____) | (_| (_) | |_| | |_ - ⢀⢾⠄ ⠫⣞⢮⡺⣝⢮⡳⣝⢮⡳⣝⠝ ⢠⢣⢂ |_____/ \___\___/ \__,_|\__| - ⡼⣕⢗⡄ ⠈⠓⠝⢮⡳⣝⠮⠳⠙ ⢠⢢⢣⢣ - ⢰⡫⡮⡳⣝⢦⡀ ⢀⢔⢕⢕⢕⢕⠅ - ⡯⣎⢯⡺⣪⡳⣝⢖⣄⣀ ⡀⡠⡢⡣⡣⡣⡣⡣⡃ -⢸⢝⢮⡳⣝⢮⡺⣪⡳⠕⠗⠉⠁ ⠘⠜⡜⡜⡜⡜⡜⡜⠜⠈ -⡯⡳⠳⠝⠊⠓⠉ ⠈⠈⠈⠈ - - - -version: v1.0.9 (go1.21.3 - darwin/arm64) -git commit: 8bf95bf60d084af341f70e8263342f71b0a3cd16 -``` diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_watch.md b/_vendor/github.com/docker/scout-cli/docs/scout_watch.md deleted file mode 100644 index 2444ce3c430..00000000000 --- a/_vendor/github.com/docker/scout-cli/docs/scout_watch.md +++ /dev/null @@ -1,53 +0,0 @@ -# docker scout watch - -<!---MARKER_GEN_START--> -Watch repositories in a registry and push images and indexes to Docker Scout (experimental) - -### Options - -| Name | Type | Default | Description | -|:---------------------|:--------------|:--------|:------------------------------------------------------------------------------------| -| `--all-images` | | | Push all images instead of only the ones pushed during the watch command is running | -| `--dry-run` | | | Watch images and prepare them, but do not push them | -| `--interval` | `int64` | `60` | Interval in seconds between checks | -| `--org` | `string` | | Namespace of the Docker organization to which image will be pushed | -| `--refresh-registry` | | | Refresh the list of repositories of a registry at every run. Only with --registry. | -| `--registry` | `string` | | Registry to watch | -| `--repository` | `stringSlice` | | Repository to watch | -| `--sbom` | | | Create and upload SBOMs | -| `--tag` | `stringSlice` | | Regular expression to match tags to watch | -| `--workers` | `int` | `3` | Number of concurrent workers | - - -<!---MARKER_GEN_END--> - -## Description - -The `docker scout watch` command watches repositories in a registry -and pushes images or analysis results to Docker Scout. - -## Examples - -### Watch for new images from two repositories and push them - -```console -$ docker scout watch --org my-org --repository registry-1.example.com/repo-1 --repository registry-2.example.com/repo-2 -``` - -### Only push images with a specific tag - -```console -$ docker scout watch --org my-org --repository registry.example.com/my-service --tag latest -``` - -### Watch all repositories of a registry - -```console -$ docker scout watch --org my-org --registry registry.example.com -``` - -### Push all images and not just the new ones - -```console -$ docker scout watch--org my-org --repository registry.example.com/my-service --all-images -``` diff --git a/_vendor/github.com/moby/buildkit/docs/attestations/slsa-definitions.md b/_vendor/github.com/moby/buildkit/docs/attestations/slsa-definitions.md index 08b692df3d3..0053e1e12d2 100644 --- a/_vendor/github.com/moby/buildkit/docs/attestations/slsa-definitions.md +++ b/_vendor/github.com/moby/buildkit/docs/attestations/slsa-definitions.md @@ -2,22 +2,436 @@ title: SLSA definitions --- -BuildKit supports the [creation of SLSA Provenance](./slsa-provenance.md) for builds that -it runs. +BuildKit supports the [creation of SLSA Provenance](./slsa-provenance.md) for +builds that it runs. The provenance format generated by BuildKit is defined by the -[SLSA Provenance format](https://slsa.dev/provenance/v0.2). +SLSA Provenance format (supports both [v0.2](https://slsa.dev/spec/v0.2/provenance) +and [v1](https://slsa.dev/spec/v1.1/provenance)). This page describes how BuildKit populate each field, and whether the field gets included when you generate attestations `mode=min` and `mode=max`. -## `builder.id` +## SLSA v1 -Corresponds to [SLSA `builder.id`](https://slsa.dev/provenance/v0.2#builder.id). +### `buildDefinition.buildType` + +* Ref: https://slsa.dev/spec/v1.1/provenance#buildType +* Included with `mode=min` and `mode=max`. + +The `buildDefinition.buildType` field is set to `https://github.com/moby/buildkit/blob/master/docs/attestations/slsa-definitions.md` +and can be used to determine the structure of the provenance content. + +```json + "buildDefinition": { + "buildType": "https://github.com/moby/buildkit/blob/master/docs/attestations/slsa-definitions.md", + ... + } +``` + +### `buildDefinition.externalParameters.configSource` + +* Ref: https://slsa.dev/spec/v1.1/provenance#externalParameters +* Included with `mode=min` and `mode=max`. + +Describes the config that initialized the build. + +```json + "buildDefinition": { + "externalParameters": { + "configSource": { + "uri": "https://github.com/moby/buildkit.git#refs/tags/v0.11.0", + "digest": { + "sha1": "4b220de5058abfd01ff619c9d2ff6b09a049bea0" + }, + "path": "Dockerfile" + }, + ... + }, + } +``` + +For builds initialized from a remote context, like a Git or HTTP URL, this +object defines the context URL and its immutable digest in the `uri` and +`digest` fields. For builds using a local frontend, such as a Dockerfile, the +`path` field defines the path for the frontend file that initialized the build +(`filename` frontend option). + +### `buildDefinition.externalParameters.request` + +* Ref: https://slsa.dev/spec/v1.1/provenance#externalParameters +* Partially included with `mode=min`. + +Describes build inputs passed to the build. + +```json + "buildDefinition": { + "externalParameters": { + "request": { + "frontend": "gateway.v0", + "args": { + "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR": "1", + "label:FOO": "bar", + "source": "docker/dockerfile-upstream:master", + "target": "release" + }, + "secrets": [ + { + "id": "GIT_AUTH_HEADER", + "optional": true + }, + ... + ], + "ssh": [], + "locals": [] + }, + ... + }, + } +``` + +The following fields are included with both `mode=min` and `mode=max`: + +- `locals` lists any local sources used in the build, including the build + context and frontend file. +- `frontend` defines type of BuildKit frontend used for the build. Currently, + this can be `dockerfile.v0` or `gateway.v0`. +- `args` defines the build arguments passed to the BuildKit frontend. + + The keys inside the `args` object reflect the options as BuildKit receives + them. For example, `build-arg` and `label` prefixes are used for build + arguments and labels, and `target` key defines the target stage that was + built. The `source` key defines the source image for the Gateway frontend, if + used. + +The following fields are only included with `mode=max`: + +- `secrets` defines secrets used during the build. Note that actual secret + values are not included. +- `ssh` defines the ssh forwards used during the build. + +### `buildDefinition.internalParameters.buildConfig` + +* Ref: https://slsa.dev/spec/v1.1/provenance#internalParameters +* Only included with `mode=max`. + +Defines the build steps performed during the build. + +BuildKit internally uses LLB definition to execute the build steps. The LLB +definition of the build steps is defined in the +`buildDefinition.internalParameters.buildConfig.llbDefinition` field. + +Each LLB step is the JSON definition of the +[LLB ProtoBuf API](https://github.com/moby/buildkit/blob/v0.10.0/solver/pb/ops.proto). +The dependencies for a vertex in the LLB graph can be found in the `inputs` +field for every step. + +```json + "buildDefinition": { + "internalParameters": { + "buildConfig": { + "llbDefinition": [ + { + "id": "step0", + "op": { + "Op": { + "exec": { + "meta": { + "args": [ + "/bin/sh", + "-c", + "go build ." + ], + "env": [ + "PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "GOPATH=/go", + "GOFLAGS=-mod=vendor", + ], + "cwd": "/src", + }, + "mounts": [...] + } + }, + "platform": {...}, + }, + "inputs": [ + "step8:0", + "step2:0", + ] + }, + ... + ] + }, + } + } +``` + +### `buildDefinition.internalParameters.builderPlatform` + +* Ref: https://slsa.dev/spec/v1.1/provenance#internalParameters +* Included with `mode=min` and `mode=max`. + +```json + "buildDefinition": { + "internalParameters": { + "builderPlatform": "linux/amd64" + ... + }, + } +``` + +BuildKit sets the `builderPlatform` of the build machine. Note that this is not +necessarily the platform of the build result that can be determined from the +`in-toto` subject field. + +### `buildDefinition.resolvedDependencies` + +* Ref: https://slsa.dev/spec/v1.1/provenance#resolvedDependencies +* Included with `mode=min` and `mode=max`. + +Defines all the external artifacts that were part of the build. The value +depends on the type of artifact: + +- The URL of Git repositories containing source code for the image +- HTTP URLs if you are building from a remote tarball, or that was included + using an `ADD` command in Dockerfile +- Any Docker images used during the build + +The URLs to the Docker images will be in +[Package URL](https://github.com/package-url/purl-spec) format. + +All the build materials will include the immutable checksum of the artifact. +When building from a mutable tag, you can use the digest information to +determine if the artifact has been updated compared to when the build ran. + +```json + "buildDefinition": { + "resolvedDependencies": [ + { + "uri": "pkg:docker/alpine@3.17?platform=linux%2Famd64", + "digest": { + "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" + } + }, + { + "uri": "https://github.com/moby/buildkit.git#refs/tags/v0.11.0", + "digest": { + "sha1": "4b220de5058abfd01ff619c9d2ff6b09a049bea0" + } + }, + ... + ], + ... + } +``` + +### `runDetails.builder.id` + +* Ref: https://slsa.dev/spec/v1.1/provenance#builder.id +* Included with `mode=min` and `mode=max`. + +The field is set to the URL of the build, if available. + +```json + "runDetails": { + "builder": { + "id": "https://github.com/docker/buildx/actions/runs/3709599520" + ... + }, + ... + } +``` + +> [!NOTE] +> This value can be set using the `builder-id` attestation parameter. + +### `runDetails.metadata.invocationID` + +* Ref: https://slsa.dev/spec/v1.1/provenance#invocationId +* Included with `mode=min` and `mode=max`. + +Unique identifier for the build invocation. When building a multi-platform image +with a single build request, this value will be the shared by all the platform +versions of the image. + +```json + "runDetails": { + "metadata": { + "invocationID": "rpv7a389uzil5lqmrgwhijwjz", + ... + }, + ... + } +``` + +### `runDetails.metadata.startedOn` + +* Ref: https://slsa.dev/spec/v1.1/provenance#startedOn +* Included with `mode=min` and `mode=max`. + +Timestamp when the build started. + +```json + "runDetails": { + "metadata": { + "startedOn": "2021-11-17T15:00:00Z", + ... + }, + ... + } +``` + +### `runDetails.metadata.finishedOn` + +* Ref: https://slsa.dev/spec/v1.1/provenance#finishedOn +* Included with `mode=min` and `mode=max`. + +Timestamp when the build finished. + +```json + "runDetails": { + "metadata": { + "finishedOn": "2021-11-17T15:01:00Z", + ... + }, + } +``` + +### `runDetails.metadata.buildkit_metadata` + +* Ref: https://slsa.dev/spec/v1.1/provenance#extension-fields +* Partially included with `mode=min`. + +This extension field defines BuildKit-specific additional metadata that is not +part of the SLSA provenance spec. + +```json + "runDetails": { + "metadata": { + "buildkit_metadata": { + "source": {...}, + "layers": {...}, + "vcs": {...}, + }, + ... + }, + } +``` + +#### `source` + +Only included with `mode=max`. + +Defines a source mapping of LLB build steps, defined in the +`buildDefinition.internalParameters.buildConfig.llbDefinition` field, to their +original source code (for example, Dockerfile commands). The `source.locations` +field contains the ranges of all the Dockerfile commands ran in an LLB step. +`source.infos` array contains the source code itself. This mapping is present +if the BuildKit frontend provided it when creating the LLB definition. + +#### `layers` + +Only included with `mode=max`. + +Defines the layer mapping of LLB build step mounts defined in +`buildDefinition.internalParameters.buildConfig.llbDefinition` to the OCI +descriptors of equivalent layers. This mapping is present if the layer data was +available, usually when attestation is for an image or if the build step pulled +in image data as part of the build. + +#### `vcs` Included with `mode=min` and `mode=max`. -The `builder.id` field is set to the URL of the build, if available. +Defines optional metadata for the version control system used for the build. If +a build uses a remote context from Git repository, BuildKit extracts the details +of the version control system automatically and displays it in the +`buildDefinition.externalParameters.configSource` field. But if the build uses +a source from a local directory, the VCS information is lost even if the +directory contained a Git repository. In this case, the build client can send +additional `vcs:source` and `vcs:revision` build options and BuildKit will add +them to the provenance attestations as extra metadata. Note that, contrary to +the `buildDefinition.externalParameters.configSource` field, BuildKit doesn't +verify the `vcs` values, and as such they can't be trusted and should only be +used as a metadata hint. + +### `runDetails.metadata.buildkit_hermetic` + +* Ref: https://slsa.dev/spec/v1.1/provenance#extension-fields +* Included with `mode=min` and `mode=max`. + +This extension field is set to true if the build was hermetic and did not access +the network. In Dockerfiles, a build is hermetic if it does not use `RUN` +commands or disables network with `--network=none` flag. + +```json + "runDetails": { + "metadata": { + "buildkit_hermetic": true, + ... + }, + } +``` + +### `runDetails.metadata.buildkit_completeness` + +* Ref: https://slsa.dev/spec/v1.1/provenance#extension-fields +* Included with `mode=min` and `mode=max`. + +This extension field defines if the provenance information is complete. It is +similar to `metadata.completeness` field in SLSA v0.2. + +`buildkit_completeness.request` is true if all the build arguments are included +in the `buildDefinition.externalParameters.request` field. When building with +`min` mode, the build arguments are not included in the provenance information +and request is not complete. Request is also not complete on direct LLB builds +that did not use a frontend. + +`buildkit_completeness.resolvedDependencies` is true if +`buildDefinition.resolvedDependencies` field includes all the dependencies of +the build. When building from un-tracked source in a local directory, the +dependencies are not complete, while when building from a remote Git repository +all dependencies can be tracked by BuildKit and +`buildkit_completeness.resolvedDependencies` is true. + +```json + "runDetails": { + "metadata": { + "buildkit_completeness": { + "request": true, + "resolvedDependencies": true + }, + ... + }, + } +``` + +### `runDetails.metadata.buildkit_reproducible` + +* Ref: https://slsa.dev/spec/v1.1/provenance#extension-fields +* Included with `mode=min` and `mode=max`. + +This extension field defines if the build result is supposed to be byte-by-byte +reproducible. It is similar to `metadata.reproducible` field in SLSA v0.2. This +value can be set by the user with the `reproducible=true` attestation parameter. + +```json + "runDetails": { + "metadata": { + "buildkit_reproducible": false, + ... + }, + } +``` + +## SLSA v0.2 + +### `builder.id` + +* Ref: https://slsa.dev/spec/v0.2/provenance#builder.id +* Included with `mode=min` and `mode=max`. + +The field is set to the URL of the build, if available. ```json "builder": { @@ -25,26 +439,25 @@ The `builder.id` field is set to the URL of the build, if available. }, ``` -This value can be set using the `builder-id` attestation parameter. - -## `buildType` +> [!NOTE] +> This value can be set using the `builder-id` attestation parameter. -Corresponds to [SLSA `buildType`](https://slsa.dev/provenance/v0.2#buildType). +### `buildType` -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildType +* Included with `mode=min` and `mode=max`. -The `buildType` field is set to `https://mobyproject.org/buildkit@v1` can be +The `buildType` field is set to `https://mobyproject.org/buildkit@v1` and can be used to determine the structure of the provenance content. ```json "buildType": "https://mobyproject.org/buildkit@v1", ``` -## `invocation.configSource` +### `invocation.configSource` -Corresponds to [SLSA `invocation.configSource`](https://slsa.dev/provenance/v0.2#invocation.configSource). - -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#invocation.configSource +* Included with `mode=min` and `mode=max`. Describes the config that initialized the build. @@ -62,15 +475,15 @@ Describes the config that initialized the build. ``` For builds initialized from a remote context, like a Git or HTTP URL, this -object defines the context URL and its immutable digest in the `uri` and `digest` fields. -For builds using a local frontend, such as a Dockerfile, the `entryPoint` field defines the path -for the frontend file that initialized the build (`filename` frontend option). +object defines the context URL and its immutable digest in the `uri` and +`digest` fields. For builds using a local frontend, such as a Dockerfile, the +`entryPoint` field defines the path for the frontend file that initialized the +build (`filename` frontend option). -## `invocation.parameters` +### `invocation.parameters` -Corresponds to [SLSA `invocation.parameters`](https://slsa.dev/provenance/v0.2#invocation.parameters). - -Partially included with `mode=min`. +* Ref: https://slsa.dev/spec/v0.2/provenance#invocation.parameters +* Partially included with `mode=min`. Describes build inputs passed to the build. @@ -118,11 +531,10 @@ The following fields are only included with `mode=max`: values are not included. - `ssh` defines the ssh forwards used during the build. -## `invocation.environment` +### `invocation.environment` -Corresponds to [SLSA `invocation.environment`](https://slsa.dev/provenance/v0.2#invocation.environment). - -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#invocation.environment +* Included with `mode=min` and `mode=max`. ```json "invocation": { @@ -137,11 +549,10 @@ The only value BuildKit currently sets is the `platform` of the current build machine. Note that this is not necessarily the platform of the build result that can be determined from the `in-toto` subject field. -## `materials` - -Corresponds to [SLSA `materials`](https://slsa.dev/provenance/v0.2#materials). +### `materials` -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#materials +* Included with `mode=min` and `mode=max`. Defines all the external artifacts that were part of the build. The value depends on the type of artifact: @@ -176,11 +587,10 @@ determine if the artifact has been updated compared to when the build ran. ], ``` -## `buildConfig` +### `buildConfig` -Corresponds to [SLSA `buildConfig`](https://slsa.dev/provenance/v0.2#buildConfig). - -Only included with `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildConfig +* Only included with `mode=max`. Defines the build steps performed during the build. @@ -228,11 +638,10 @@ field for every step. }, ``` -## `metadata.buildInvocationId` +### `metadata.buildInvocationId` -Corresponds to [SLSA `metadata.buildInvocationId`](https://slsa.dev/provenance/v0.2#metadata.buildIncocationId). - -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildInvocationId +* Included with `mode=min` and `mode=max`. Unique identifier for the build invocation. When building a multi-platform image with a single build request, this value will be the shared by all the platform @@ -245,11 +654,10 @@ versions of the image. }, ``` -## `metadata.buildStartedOn` +### `metadata.buildStartedOn` -Corresponds to [SLSA `metadata.buildStartedOn`](https://slsa.dev/provenance/v0.2#metadata.buildStartedOn). - -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildStartedOn +* Included with `mode=min` and `mode=max`. Timestamp when the build started. @@ -260,11 +668,10 @@ Timestamp when the build started. }, ``` -## `metadata.buildFinishedOn` - -Corresponds to [SLSA `metadata.buildFinishedOn`](https://slsa.dev/provenance/v0.2#metadata.buildFinishedOn). +### `metadata.buildFinishedOn` -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildFinishedOn +* Included with `mode=min` and `mode=max`. Timestamp when the build finished. @@ -275,19 +682,18 @@ Timestamp when the build finished. }, ``` -## `metadata.completeness` - -Corresponds to [SLSA `metadata.completeness`](https://slsa.dev/provenance/v0.2#metadata.completeness). +### `metadata.completeness` -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#metadata.completeness +* Included with `mode=min` and `mode=max`. Defines if the provenance information is complete. `completeness.parameters` is true if all the build arguments are included in the -`invocation.parameters` field. When building with `min` mode, the build -arguments are not included in the provenance information and parameters are not -complete. Parameters are also not complete on direct LLB builds that did not use -a frontend. +`parameters` field. When building with `min` mode, the build arguments are not +included in the provenance information and parameters are not complete. +Parameters are also not complete on direct LLB builds that did not use a +frontend. `completeness.environment` is always true for BuildKit builds. @@ -308,9 +714,10 @@ is true. }, ``` -## `metadata.reproducible` +### `metadata.reproducible` -Corresponds to [SLSA `metadata.reproducible`](https://slsa.dev/provenance/v0.2#metadata.reproducible). +* Ref: https://slsa.dev/spec/v0.2/provenance#metadata.reproducible +* Included with `mode=min` and `mode=max`. Defines if the build result is supposed to be byte-by-byte reproducible. This value can be set by the user with the `reproducible=true` attestation parameter. @@ -322,7 +729,7 @@ value can be set by the user with the `reproducible=true` attestation parameter. }, ``` -## `metadata.https://mobyproject.org/buildkit@v1#hermetic` +### `metadata.https://mobyproject.org/buildkit@v1#hermetic` Included with `mode=min` and `mode=max`. @@ -337,7 +744,7 @@ commands or disables network with `--network=none` flag. }, ``` -## `metadata.https://mobyproject.org/buildkit@v1#metadata` +### `metadata.https://mobyproject.org/buildkit@v1#metadata` Partially included with `mode=min`. @@ -355,7 +762,7 @@ part of the SLSA provenance spec. }, ``` -### `source` +#### `source` Only included with `mode=max`. @@ -366,7 +773,7 @@ the Dockerfile commands ran in an LLB step. `source.infos` array contains the source code itself. This mapping is present if the BuildKit frontend provided it when creating the LLB definition. -### `layers` +#### `layers` Only included with `mode=max`. @@ -375,7 +782,7 @@ Defines the layer mapping of LLB build step mounts defined in mapping is present if the layer data was available, usually when attestation is for an image or if the build step pulled in image data as part of the build. -### `vcs` +#### `vcs` Included with `mode=min` and `mode=max`. @@ -389,227 +796,3 @@ repository. In this case, the build client can send additional `vcs:source` and attestations as extra metadata. Note that, contrary to the `invocation.configSource` field, BuildKit doesn't verify the `vcs` values, and as such they can't be trusted and should only be used as a metadata hint. - -## Output - -To inspect the provenance that was generated and attached to a container image, -you can use the `docker buildx imagetools` command to inspect the image in a -registry. Inspecting the attestation displays the format described in the -[attestation storage specification](./attestation-storage.md). - -For example, inspecting a simple Docker image based on `alpine:latest` results -in a provenance attestation similar to the following, for a `mode=min` build: - -```json -{ - "_type": "https://in-toto.io/Statement/v0.1", - "predicateType": "https://slsa.dev/provenance/v0.2", - "subject": [ - { - "name": "pkg:docker/<registry>/<image>@<tag/digest>?platform=<platform>", - "digest": { - "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862" - } - } - ], - "predicate": { - "builder": { - "id": "" - }, - "buildType": "https://mobyproject.org/buildkit@v1", - "materials": [ - { - "uri": "pkg:docker/alpine@latest?platform=linux%2Famd64", - "digest": { - "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" - } - } - ], - "invocation": { - "configSource": { - "entryPoint": "Dockerfile" - }, - "parameters": { - "frontend": "dockerfile.v0", - "args": {}, - "locals": [ - { - "name": "context" - }, - { - "name": "dockerfile" - } - ] - }, - "environment": { - "platform": "linux/amd64" - } - }, - "metadata": { - "buildInvocationID": "yirbp1aosi1vqjmi3z6bc75nb", - "buildStartedOn": "2022-12-08T11:48:59.466513707Z", - "buildFinishedOn": "2022-12-08T11:49:01.256820297Z", - "reproducible": false, - "completeness": { - "parameters": true, - "environment": true, - "materials": false - }, - "https://mobyproject.org/buildkit@v1#metadata": {} - } - } -} -``` - -For a similar build, but with `mode=max`: - -```json -{ - "_type": "https://in-toto.io/Statement/v0.1", - "predicateType": "https://slsa.dev/provenance/v0.2", - "subject": [ - { - "name": "pkg:docker/<registry>/<image>@<tag/digest>?platform=<platform>", - "digest": { - "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862" - } - } - ], - "predicate": { - "builder": { - "id": "" - }, - "buildType": "https://mobyproject.org/buildkit@v1", - "materials": [ - { - "uri": "pkg:docker/alpine@latest?platform=linux%2Famd64", - "digest": { - "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" - } - } - ], - "invocation": { - "configSource": { - "entryPoint": "Dockerfile" - }, - "parameters": { - "frontend": "dockerfile.v0", - "args": {}, - "locals": [ - { - "name": "context" - }, - { - "name": "dockerfile" - } - ] - }, - "environment": { - "platform": "linux/amd64" - } - }, - "buildConfig": { - "llbDefinition": [ - { - "id": "step0", - "op": { - "Op": { - "source": { - "identifier": "docker-image://docker.io/library/alpine:latest@sha256:8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" - } - }, - "platform": { - "Architecture": "amd64", - "OS": "linux" - }, - "constraints": {} - } - }, - { - "id": "step1", - "op": { - "Op": null - }, - "inputs": ["step0:0"] - } - ] - }, - "metadata": { - "buildInvocationID": "46ue2x93k3xj5l463dektwldw", - "buildStartedOn": "2022-12-08T11:50:54.953375437Z", - "buildFinishedOn": "2022-12-08T11:50:55.447841328Z", - "reproducible": false, - "completeness": { - "parameters": true, - "environment": true, - "materials": false - }, - "https://mobyproject.org/buildkit@v1#metadata": { - "source": { - "locations": { - "step0": { - "locations": [ - { - "ranges": [ - { - "start": { - "line": 1 - }, - "end": { - "line": 1 - } - } - ] - } - ] - } - }, - "infos": [ - { - "filename": "Dockerfile", - "data": "RlJPTSBhbHBpbmU6bGF0ZXN0Cg==", - "llbDefinition": [ - { - "id": "step0", - "op": { - "Op": { - "source": { - "identifier": "local://dockerfile", - "attrs": { - "local.differ": "none", - "local.followpaths": "[\"Dockerfile\",\"Dockerfile.dockerignore\",\"dockerfile\"]", - "local.session": "q2jnwdkas0i0iu4knchd92jaz", - "local.sharedkeyhint": "dockerfile" - } - } - }, - "constraints": {} - } - }, - { - "id": "step1", - "op": { - "Op": null - }, - "inputs": ["step0:0"] - } - ] - } - ] - }, - "layers": { - "step0:0": [ - [ - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "digest": "sha256:c158987b05517b6f2c5913f3acef1f2182a32345a304fe357e3ace5fadcad715", - "size": 3370706 - } - ] - ] - } - } - } - } -} -``` diff --git a/_vendor/github.com/moby/buildkit/docs/buildkitd.toml.md b/_vendor/github.com/moby/buildkit/docs/buildkitd.toml.md index ec314b9f08b..00eef1ddbcb 100644 --- a/_vendor/github.com/moby/buildkit/docs/buildkitd.toml.md +++ b/_vendor/github.com/moby/buildkit/docs/buildkitd.toml.md @@ -20,7 +20,12 @@ trace = true # root is where all buildkit state is stored. root = "/var/lib/buildkit" # insecure-entitlements allows insecure entitlements, disabled by default. -insecure-entitlements = [ "network.host", "security.insecure" ] +insecure-entitlements = [ "network.host", "security.insecure", "device" ] +# provenanceEnvDir is the directory where extra config is loaded that is added +# to the provenance of builds: +# slsa v0.2: invocation.environment.* +# slsa v1: buildDefinition.internalParameters.* +provenanceEnvDir = "/etc/buildkit/provenance.d" [log] # log formatter: json or text @@ -69,7 +74,6 @@ insecure-entitlements = [ "network.host", "security.insecure" ] # Whether run subprocesses in main pid namespace or not, this is useful for # running rootless buildkit inside a container. noProcessSandbox = false - # gc enables/disables garbage collection gc = true # reservedSpace is the minimum amount of disk space guaranteed to be @@ -87,7 +91,6 @@ insecure-entitlements = [ "network.host", "security.insecure" ] # collector will attempt to leave - however, it will never be bought below # reservedSpace. minFreeSpace = "20GB" - # alternate OCI worker binary name(example 'crun'), by default either # buildkit-runc or runc binary is used binary = "" @@ -116,7 +119,6 @@ insecure-entitlements = [ "network.host", "security.insecure" ] # collector will attempt to leave - however, it will never be bought below # reservedSpace. minFreeSpace = "10GB" - # keepDuration can be an integer number of seconds (e.g. 172800), or a # string duration (e.g. "48h") keepDuration = "48h" @@ -148,7 +150,8 @@ insecure-entitlements = [ "network.host", "security.insecure" ] # collector will attempt to leave - however, it will never be bought below # reservedSpace. minFreeSpace = "20GB" - + # limit the number of parallel build steps that can run at the same time + max-parallelism = 4 # maintain a pool of reusable CNI network namespaces to amortize the overhead # of allocating and releasing the namespaces cniPoolSize = 16 @@ -176,8 +179,13 @@ insecure-entitlements = [ "network.host", "security.insecure" ] [registry."docker.io"] # mirror configuration to handle path in case a mirror registry requires a /project path rather than just a host:port mirrors = ["yourmirror.local:5000", "core.harbor.domain/proxy.docker.io"] + # Use plain HTTP to connect to the mirrors. http = true + # Use HTTPS with self-signed certificates. Do not enable this together with `http`. insecure = true + # If you use token auth with self-signed certificates, + # then buildctl also needs to trust the token provider CA (for example, certificates that are configured for registry) + # because buildctl pulls tokens directly without daemon process ca=["/etc/config/myca.pem"] [[registry."docker.io".keypair]] key="/etc/config/key.pem" @@ -193,7 +201,6 @@ insecure-entitlements = [ "network.host", "security.insecure" ] [frontend."gateway.v0"] enabled = true - # If allowedRepositories is empty, all gateway sources are allowed. # Otherwise, only the listed repositories are allowed as a gateway source. # @@ -207,4 +214,18 @@ insecure-entitlements = [ "network.host", "security.insecure" ] # how often buildkit scans for changes in the supported emulated platforms platformsCacheMaxAge = "1h" + +# optional signed cache configuration for GitHub Actions backend +[ghacache.sign] +# command that signs the payload in stdin and outputs the signature to stdout. Normally you want cosign to produce the signature bytes. +cmd = "" +[ghacache.verify] +required = false +[ghacache.verify.policy] +timestampThreshold = 1 +tlogThreshold = 1 +# cetificate properties that need to match. Simple wildcards (*) are supported. +certificateIssuer = "" +subjectAlternativeName = "" +buildSignerURI = "" ``` diff --git a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/reference.md b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/reference.md index c5105e52487..997e8a6abf7 100644 --- a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/reference.md +++ b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/reference.md @@ -373,10 +373,16 @@ whitespace, like `${foo}_bar`. The `${variable_name}` syntax also supports a few of the standard `bash` modifiers as specified below: -- `${variable:-word}` indicates that if `variable` is set then the result - will be that value. If `variable` is not set then `word` will be the result. -- `${variable:+word}` indicates that if `variable` is set then `word` will be - the result, otherwise the result is the empty string. +- `${variable:-word}` indicates that if `variable` is set and non-empty then + the result will be that value. If `variable` is unset or empty then `word` + will be the result. +- `${variable-word}` indicates that if `variable` is set (even if empty) then + the result will be that value. If `variable` is unset then `word` will be + the result. +- `${variable:+word}` indicates that if `variable` is set and non-empty then + `word` will be the result, otherwise the result is the empty string. +- `${variable+word}` indicates that if `variable` is set (even if empty) then + `word` will be the result, otherwise the result is the empty string. The following variable replacements are supported in a pre-release version of Dockerfile syntax, when using the `# syntax=docker/dockerfile-upstream:master` syntax @@ -568,8 +574,8 @@ You can also use heredocs with the shell form to break up supported commands. ```dockerfile RUN <<EOF -source $HOME/.bashrc && \ -echo $HOME + source $HOME/.bashrc + echo $HOME EOF ``` @@ -621,6 +627,20 @@ The image can be any valid image. [`COPY --from=<name>`](#copy---from), and [`RUN --mount=type=bind,from=<name>`](#run---mounttypebind) instructions to refer to the image built in this stage. + + Using a previous build stage as the base for a subsequent stage is a common + pattern for sharing a common base environment: + + ```dockerfile + FROM ubuntu AS base + RUN apt-get update && apt-get install -y shared-tooling + + FROM base AS dev + RUN apt-get install -y dev-tooling + + FROM base AS prod + COPY --from=build /app /app + ``` - The `tag` or `digest` values are optional. If you omit either of them, the builder assumes a `latest` tag by default. The builder returns an error if it can't find the `tag` value. @@ -693,7 +713,7 @@ The available `[OPTIONS]` for the `RUN` instruction are: | [`--device`](#run---device) | 1.14-labs | | [`--mount`](#run---mount) | 1.2 | | [`--network`](#run---network) | 1.3 | -| [`--security`](#run---security) | 1.1.2-labs | +| [`--security`](#run---security) | 1.20 | ### Cache invalidation for RUN instructions @@ -721,6 +741,12 @@ RUN --device=name,[required] `RUN --device` allows build to request [CDI devices](https://github.com/moby/buildkit/blob/master/docs/cdi.md) to be available to the build step. +> [!WARNING] +> The use of `--device` is protected by the `device` entitlement, which needs +> to be enabled when starting the buildkitd daemon with +> `--allow-insecure-entitlement device` flag or in [buildkitd config](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md), +> and for a build request with [`--allow device` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/#allow). + The device `name` is provided by the CDI specification registered in BuildKit. In the following example, multiple devices are registered in the CDI @@ -752,6 +778,8 @@ devices: containerEdits: env: - QUX=injected +annotations: + org.mobyproject.buildkit.device.autoallow: true ``` The device name format is flexible and accepts various patterns to support @@ -762,6 +790,14 @@ multiple device configurations: * `vendor1.com/device=*`: request all devices for this vendor * `class1`: request devices by `org.mobyproject.buildkit.device.class` annotation +> [!NOTE] +> Annotations are supported by the CDI specification since 0.6.0. + +> [!NOTE] +> To automatically allow all devices registered in the CDI specification, you +> can set the `org.mobyproject.buildkit.device.autoallow` annotation. You can +> also set this annotation for a specific device. + #### Example: CUDA-Powered LLaMA Inference In this example we use the `--device` flag to run `llama.cpp` inference using @@ -817,12 +853,13 @@ The supported mount types are: This mount type allows binding files or directories to the build container. A bind mount is read-only by default. -| Option | Description | -| ---------------------------------- | ---------------------------------------------------------------------------------------------- | -| `target`, `dst`, `destination`[^1] | Mount path. | -| `source` | Source path in the `from`. Defaults to the root of the `from`. | -| `from` | Build stage, context, or image name for the root of the source. Defaults to the build context. | -| `rw`,`readwrite` | Allow writes on the mount. Written data will be discarded. | +| Option | Description | +| ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | +| `target`, `dst`, `destination`[^1] | Mount path. | +| `source` | Source path in the `from`. Defaults to the root of the `from`. | +| `from` | Build stage, context, or image name for the root of the source. Defaults to the build context. | +| `rw`,`readwrite` | Allow writes on the mount. Written data will be discarded after the `RUN` instruction completes and will not be committed to the image layer. | + ### RUN --mount=type=cache @@ -864,7 +901,7 @@ FROM ubuntu RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt update && apt-get --no-install-recommends install -y gcc + apt-get update && apt-get --no-install-recommends install -y gcc ``` Apt needs exclusive access to its data, so the caches use the option @@ -1023,9 +1060,6 @@ The command is run in the host's network environment (similar to ### RUN --security -> [!NOTE] -> Not yet available in stable syntax, use [`docker/dockerfile:1-labs`](#syntax) version. - ```dockerfile RUN --security=<sandbox|insecure> ``` @@ -1046,7 +1080,7 @@ Default sandbox mode can be activated via `--security=sandbox`, but that is no-o #### Example: check entitlements ```dockerfile -# syntax=docker/dockerfile:1-labs +# syntax=docker/dockerfile:1 FROM ubuntu RUN --security=insecure cat /proc/self/status | grep CapEff ``` @@ -1131,6 +1165,13 @@ Labels included in base images (images in the `FROM` line) are inherited by your image. If a label already exists but with a different value, the most-recently-applied value overrides any previously-set value. +In a multi-stage build, labels from intermediate stages are only present in +the final image if the final stage is directly or indirectly based on them +(via `FROM`). Labels from a stage that you only reference with +`COPY --from` or `RUN --mount=from=` are not included in the output image. +Labels from the base image specified in the final `FROM` instruction are +always inherited. + To view an image's labels, use the `docker image inspect` command. You can use the `--format` option to show just the labels; @@ -1309,10 +1350,11 @@ The available `[OPTIONS]` are: | --------------------------------------- | -------------------------- | | [`--keep-git-dir`](#add---keep-git-dir) | 1.1 | | [`--checksum`](#add---checksum) | 1.6 | -| [`--chown`](#add---chown---chmod) | | -| [`--chmod`](#add---chown---chmod) | 1.2 | +| [`--chmod`](#add---chmod) | 1.2 | +| [`--chown`](#add---chown) | | | [`--link`](#add---link) | 1.4 | -| [`--exclude`](#add---exclude) | 1.7-labs | +| [`--unpack`](#add---unpack) | 1.17 | +| [`--exclude`](#add---exclude) | 1.19 | The `ADD` instruction copies new files or directories from `<src>` and adds them to the filesystem of the image at the path `<dest>`. Files and directories @@ -1418,9 +1460,8 @@ ADD arr[[]0].txt /dest/ When using a local tar archive as the source for `ADD`, and the archive is in a recognized compression format (`gzip`, `bzip2` or `xz`, or uncompressed), the -archive is decompressed and extracted into the specified destination. Only -local tar archives are extracted. If the tar archive is a remote URL, the -archive is not extracted, but downloaded and placed at the destination. +archive is decompressed and extracted into the specified destination. Local tar +archives are extracted by default, see the [`ADD --unpack` flag]. When a directory is extracted, it has the same behavior as `tar -x`. The result is the union of: @@ -1445,6 +1486,9 @@ file. However, like any other file processed during an `ADD`, `mtime` isn't included in the determination of whether or not the file has changed and the cache should be updated. +If remote file is a tar archive, the archive is not extracted by default. To +download and extract the archive, use the [`ADD --unpack` flag]. + If the destination ends with a trailing slash, then the filename is inferred from the URL path. For example, `ADD http://example.com/foobar /` would create the file `/foobar`. The URL must have a nontrivial path so that an appropriate @@ -1557,24 +1601,51 @@ ADD --keep-git-dir=true https://github.com/moby/buildkit.git#v0.10.1 /buildkit ADD [--checksum=<hash>] <src> ... <dir> ``` -The `--checksum` flag lets you verify the checksum of a remote resource. The -checksum is formatted as `sha256:<hash>`. SHA-256 is the only supported hash -algorithm. +The `--checksum` flag lets you verify the checksum of a remote Git or HTTP +resource: + +- For Git sources, the checksum is the commit SHA. It can be the full commit + SHA or match on the prefix (1 or more characters). +- For HTTP sources, the checksum is the SHA-256 content digest, formatted as + `sha256:<hash>`. SHA-256 is the only supported hash algorithm. ```dockerfile +ADD --checksum=be1f38e https://github.com/moby/buildkit.git#v0.26.2 / ADD --checksum=sha256:24454f830cdb571e2c4ad15481119c43b3cafd48dd869a9b2945d1036d1dc68d https://mirrors.edge.kernel.org/pub/linux/kernel/Historic/linux-0.01.tar.gz / ``` -The `--checksum` flag only supports HTTP(S) sources. +### ADD --chmod -### ADD --chown --chmod +See [`COPY --chmod`](#copy---chmod). -See [`COPY --chown --chmod`](#copy---chown---chmod). +### ADD --chown + +See [`COPY --chown`](#copy---chown). ### ADD --link See [`COPY --link`](#copy---link). +### ADD --unpack + +```dockerfile +ADD [--unpack=<bool>] <src> ... <dir> +``` + +The `--unpack` flag controls whether or not to automatically unpack tar +archives (including compressed formats like `gzip` or `bzip2`) when adding them +to the image. Local tar archives are unpacked by default, whereas remote tar +archives (where `src` is a URL) are downloaded without unpacking. + +```dockerfile +# syntax=docker/dockerfile:1 +FROM alpine +# Download and unpack archive.tar.gz into /download: +ADD --unpack=true https://example.com/archive.tar.gz /download +# Add local tar without unpacking: +ADD --unpack=false my-archive.tar.gz . +``` + ### ADD --exclude See [`COPY --exclude`](#copy---exclude). @@ -1594,11 +1665,11 @@ The available `[OPTIONS]` are: | Option | Minimum Dockerfile version | | ---------------------------------- | -------------------------- | | [`--from`](#copy---from) | | -| [`--chown`](#copy---chown---chmod) | | -| [`--chmod`](#copy---chown---chmod) | 1.2 | +| [`--chmod`](#copy---chmod) | 1.2 | +| [`--chown`](#copy---chown) | | | [`--link`](#copy---link) | 1.4 | -| [`--parents`](#copy---parents) | 1.7-labs | -| [`--exclude`](#copy---exclude) | 1.7-labs | +| [`--parents`](#copy---parents) | 1.20 | +| [`--exclude`](#copy---exclude) | 1.19 | The `COPY` instruction copies new files or directories from `<src>` and adds them to the filesystem of the image at the path `<dest>`. Files and directories @@ -1765,32 +1836,52 @@ COPY --from=nginx:latest /etc/nginx/nginx.conf /nginx.conf The source path of `COPY --from` is always resolved from filesystem root of the image or stage that you specify. -### COPY --chown --chmod +### COPY --chmod -> [!NOTE] -> Only octal notation is currently supported. Non-octal support is tracked in -> [moby/buildkit#1951](https://github.com/moby/buildkit/issues/1951). +```dockerfile +COPY [--chmod=<perms>] <src> ... <dest> +``` + +The `--chmod` flag supports octal notation (e.g., `755`, `644`) and symbolic +notation (e.g., `+x`, `g=u`). Symbolic notation (added in Dockerfile version 1.14) +is useful when octal isn't flexible enough. For example, `u=rwX,go=rX` sets +directories to 755 and files to 644, while preserving the executable bit on files +that already have it. (Capital `X` means "executable only if it's a directory or +already executable.") + +For more information about symbolic notation syntax, see the +[chmod(1) manual](https://man.freebsd.org/cgi/man.cgi?chmod). + +Examples using octal notation: + +```dockerfile +COPY --chmod=755 app.sh /app/ +COPY --chmod=644 file.txt /data/ +ARG MODE=440 +COPY --chmod=$MODE . . +``` + +Examples using symbolic notation: ```dockerfile -COPY [--chown=<user>:<group>] [--chmod=<perms> ...] <src> ... <dest> +COPY --chmod=+x script.sh /app/ +COPY --chmod=u=rwX,go=rX . /app/ +COPY --chmod=g=u config/ /config/ ``` -The `--chown` and `--chmod` features are only supported on Dockerfiles used to build Linux containers, -and doesn't work on Windows containers. Since user and group ownership concepts do -not translate between Linux and Windows, the use of `/etc/passwd` and `/etc/group` for -translating user and group names to IDs restricts this feature to only be viable for -Linux OS-based containers. +The `--chmod` flag is not supported when building Windows containers. -All files and directories copied from the build context are created with a UID and GID of `0` unless the -optional `--chown` flag specifies a given username, groupname, or UID/GID -combination to request specific ownership of the copied content. The -format of the `--chown` flag allows for either username and groupname strings -or direct integer UID and GID in any combination. Providing a username without -groupname or a UID without GID will use the same numeric UID as the GID. If a -username or groupname is provided, the container's root filesystem -`/etc/passwd` and `/etc/group` files will be used to perform the translation -from name to integer UID or GID respectively. The following examples show -valid definitions for the `--chown` flag: +### COPY --chown + +```dockerfile +COPY [--chown=<user>:<group>] <src> ... <dest> +``` + +Sets ownership of copied files. Without this flag, files are created with UID +and GID of 0. + +The flag accepts usernames, group names, UIDs, or GIDs in any combination. +If you specify only a user, the GID is set to the same numeric value as the UID. ```dockerfile COPY --chown=55:mygroup files* /somedir/ @@ -1800,22 +1891,12 @@ COPY --chown=10:11 files* /somedir/ COPY --chown=myuser:mygroup --chmod=644 files* /somedir/ ``` -If the container root filesystem doesn't contain either `/etc/passwd` or -`/etc/group` files and either user or group names are used in the `--chown` -flag, the build will fail on the `COPY` operation. Using numeric IDs requires -no lookup and does not depend on container root filesystem content. +When using names instead of numeric IDs, BuildKit resolves them using +`/etc/passwd` and `/etc/group` in the container's root filesystem. If these +files are missing or don't contain the specified names, the build fails. +Numeric IDs don't require this lookup. -With the Dockerfile syntax version 1.10.0 and later, -the `--chmod` flag supports variable interpolation, -which lets you define the permission bits using build arguments: - -```dockerfile -# syntax=docker/dockerfile:1.10 -FROM alpine -WORKDIR /src -ARG MODE=440 -COPY --chmod=$MODE . . -``` +The `--chown` flag is not supported when building Windows containers. ### COPY --link @@ -1888,9 +1969,6 @@ conditions for cache reuse. ### COPY --parents -> [!NOTE] -> Not yet available in stable syntax, use [`docker/dockerfile:1.7-labs`](#syntax) version. - ```dockerfile COPY [--parents[=<boolean>]] <src> ... <dest> ``` @@ -1898,7 +1976,7 @@ COPY [--parents[=<boolean>]] <src> ... <dest> The `--parents` flag preserves parent directories for `src` entries. This flag defaults to `false`. ```dockerfile -# syntax=docker/dockerfile:1-labs +# syntax=docker/dockerfile:1 FROM scratch COPY ./x/a.txt ./y/a.txt /no_parents/ @@ -1918,7 +1996,7 @@ directories after it will be preserved. This may be especially useful copies bet with `--from` where the source paths need to be absolute. ```dockerfile -# syntax=docker/dockerfile:1-labs +# syntax=docker/dockerfile:1 FROM scratch COPY --parents ./x/./y/*.txt /parents/ @@ -1932,6 +2010,26 @@ COPY --parents ./x/./y/*.txt /parents/ # /parents/y/b.txt ``` +The `**` wildcard matches any number of path components, including none, and +can be used to recursively match files across directory levels: + +```dockerfile +# syntax=docker/dockerfile:1 +FROM scratch + +COPY --parents ./src/**/*.txt /parents/ + +# Build context: +# ./src/a.txt +# ./src/x/b.txt +# ./src/x/y/c.txt +# +# Output: +# /parents/src/a.txt +# /parents/src/x/b.txt +# /parents/src/x/y/c.txt +``` + Note that, without the `--parents` flag specified, any filename collision will fail the Linux `cp` operation with an explicit error message (`cp: will not overwrite just-created './x/a.txt' with './y/a.txt'`), where the @@ -1945,9 +2043,6 @@ with the `--parents` flag, the Buildkit is capable of packing multiple ### COPY --exclude -> [!NOTE] -> Not yet available in stable syntax, use [`docker/dockerfile:1.7-labs`](#syntax) version. - ```dockerfile COPY [--exclude=<path> ...] <src> ... <dest> ``` @@ -1960,7 +2055,7 @@ supporting wildcards and matching using Go's For example, to add all files starting with "hom", excluding files with a `.txt` extension: ```dockerfile -# syntax=docker/dockerfile:1-labs +# syntax=docker/dockerfile:1 FROM scratch COPY --exclude=*.txt hom* /mydir/ @@ -1972,7 +2067,7 @@ even if the files paths match the pattern specified in `<src>`. To add all files starting with "hom", excluding files with either `.txt` or `.md` extensions: ```dockerfile -# syntax=docker/dockerfile:1-labs +# syntax=docker/dockerfile:1 FROM scratch COPY --exclude=*.txt --exclude=*.md hom* /mydir/ @@ -2013,8 +2108,8 @@ This allows arguments to be passed to the entry point, i.e., `docker run <image> -d` will pass the `-d` argument to the entry point. You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` flag. -The shell form of `ENTRYPOINT` prevents any `CMD` command line arguments from -being used. It also starts your `ENTRYPOINT` as a subcommand of `/bin/sh -c`, +The shell form of `ENTRYPOINT` ignores any `CMD` or `docker run` command line +arguments. It also starts your `ENTRYPOINT` as a subcommand of `/bin/sh -c`, which does not pass signals. This means that the executable will not be the container's `PID 1`, and will not receive Unix signals. In this case, your executable doesn't receive a `SIGTERM` from `docker stop <container>`. @@ -2024,8 +2119,14 @@ Only the last `ENTRYPOINT` instruction in the Dockerfile will have an effect. ### Exec form ENTRYPOINT example You can use the exec form of `ENTRYPOINT` to set fairly stable default commands -and arguments and then use either form of `CMD` to set additional defaults that -are more likely to be changed. +and arguments and then use `CMD` to set additional defaults that are more +likely to be changed. + +When combining exec form `ENTRYPOINT` with `CMD`, use the exec form of `CMD` +as well. Using the shell form of `CMD` causes it to be wrapped in +`/bin/sh -c`, which means the `ENTRYPOINT` receives a shell invocation as its +argument rather than the bare command and parameters. See +[Understand how CMD and ENTRYPOINT interact](#understand-how-cmd-and-entrypoint-interact). ```dockerfile FROM ubuntu @@ -2329,7 +2430,7 @@ USER <UID>[:<GID>] The `USER` instruction sets the user name (or UID) and optionally the user group (or GID) to use as the default user and group for the remainder of the current stage. The specified user is used for `RUN` instructions and at -runtime, runs the relevant `ENTRYPOINT` and `CMD` commands. +runtime runs the relevant `ENTRYPOINT` and `CMD` commands. > Note that when specifying a group for the user, the user will have _only_ the > specified group membership. Any other configured group memberships will be ignored. @@ -2397,9 +2498,15 @@ Therefore, to avoid unintended operations in unknown directories, it's best prac ARG <name>[=<default value>] [<name>[=<default value>]...] ``` -The `ARG` instruction defines a variable that users can pass at build-time to +The `ARG` instruction defines a variable that users can pass at build time to the builder with the `docker build` command using the `--build-arg <varname>=<value>` -flag. +flag. This variable can be used in subsequent instructions such as `FROM`, `ENV`, +`WORKDIR`, and others using the `${VAR}` or `$VAR` template syntax. +It is also passed to all subsequent `RUN` instructions as a build-time +environment variable. + +Unlike `ENV`, an `ARG` variable is not embedded in the image and is not available +in the final container. > [!WARNING] > It isn't recommended to use build arguments for passing secrets such as @@ -2608,15 +2715,16 @@ RUN echo "I'm building for $TARGETPLATFORM" ### BuildKit built-in build args -| Arg | Type | Description | -| ------------------------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `BUILDKIT_CACHE_MOUNT_NS` | String | Set optional cache ID namespace. | -| `BUILDKIT_CONTEXT_KEEP_GIT_DIR` | Bool | Trigger Git context to keep the `.git` directory. | -| `BUILDKIT_INLINE_CACHE`[^2] | Bool | Inline cache metadata to image config or not. | -| `BUILDKIT_MULTI_PLATFORM` | Bool | Opt into deterministic output regardless of multi-platform output or not. | -| `BUILDKIT_SANDBOX_HOSTNAME` | String | Set the hostname (default `buildkitsandbox`) | -| `BUILDKIT_SYNTAX` | String | Set frontend image | -| `SOURCE_DATE_EPOCH` | Int | Set the Unix timestamp for created image and layers. More info from [reproducible builds](https://reproducible-builds.org/docs/source-date-epoch/). Supported since Dockerfile 1.5, BuildKit 0.11 | +| Arg | Type | Description | +|----------------------------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `BUILDKIT_BUILD_NAME` | String | Override the build name shown in [`buildx history` command](https://docs.docker.com/reference/cli/docker/buildx/history/) and [Docker Desktop Builds view](https://docs.docker.com/desktop/use-desktop/builds/). | +| `BUILDKIT_CACHE_MOUNT_NS` | String | Set optional cache ID namespace. | +| `BUILDKIT_CONTEXT_KEEP_GIT_DIR` | Bool | Trigger Git context to keep the `.git` directory. | +| `BUILDKIT_INLINE_CACHE`[^2] | Bool | Inline cache metadata to image config or not. | +| `BUILDKIT_MULTI_PLATFORM` | Bool | Opt into deterministic output regardless of multi-platform output or not. | +| `BUILDKIT_SANDBOX_HOSTNAME` | String | Set the hostname (default `buildkitsandbox`) | +| `BUILDKIT_SYNTAX` | String | Set frontend image | +| `SOURCE_DATE_EPOCH` | Int | Set the Unix timestamp for created image and layers. More info from [reproducible builds](https://reproducible-builds.org/docs/source-date-epoch/). Supported since Dockerfile 1.5, BuildKit 0.11 | #### Example: keep `.git` dir @@ -2782,6 +2890,11 @@ for instance `SIGKILL`, or an unsigned number that matches a position in the kernel's syscall table, for instance `9`. The default is `SIGTERM` if not defined. +`STOPSIGNAL` applies to the signal sent by `docker stop` (and by the Docker +daemon when stopping a container). It does not affect signals sent by keyboard +shortcuts such as Ctrl+C, which sends `SIGINT` directly to the process +regardless of the `STOPSIGNAL` setting. + The image's default stopsignal can be overridden per container, using the `--stop-signal` flag on `docker run` and `docker create`. @@ -2812,9 +2925,12 @@ The options that can appear before `CMD` are: The health check will first run **interval** seconds after the container is started, and then again **interval** seconds after each previous check completes. +During the **start period**, health checks run at **start interval** frequency +instead. If a single run of the check takes longer than **timeout** seconds then the check -is considered to have failed. +is considered to have failed. The process performing the check is abruptly stopped +with a `SIGKILL`. It takes **retries** consecutive failures of the health check for the container to be considered `unhealthy`. diff --git a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/_index.md b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/_index.md index 0938ace3dfe..2060cc6db45 100644 --- a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/_index.md +++ b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/_index.md @@ -100,12 +100,20 @@ To learn more about how to use build checks, see <td>FROM --platform flag should not use a constant value</td> </tr> <tr> - <td><a href="./copy-ignored-file/">CopyIgnoredFile (experimental)</a></td> + <td><a href="./copy-ignored-file/">CopyIgnoredFile</a></td> <td>Attempting to Copy file that is excluded by .dockerignore</td> </tr> <tr> <td><a href="./invalid-definition-description/">InvalidDefinitionDescription (experimental)</a></td> <td>Comment for build stage or argument should follow the format: `# <arg/stage name> <description>`. If this is not intended to be a description comment, add an empty line or comment between the instruction and the comment.</td> </tr> + <tr> + <td><a href="./expose-proto-casing/">ExposeProtoCasing</a></td> + <td>Protocol in EXPOSE instruction should be lowercase</td> + </tr> + <tr> + <td><a href="./expose-invalid-format/">ExposeInvalidFormat</a></td> + <td>IP address and host-port mapping should not be used in EXPOSE instruction. This will become an error in a future release</td> + </tr> </tbody> </table> diff --git a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/copy-ignored-file.md b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/copy-ignored-file.md index 3e8e57e8d4c..535da0be637 100644 --- a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/copy-ignored-file.md +++ b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/copy-ignored-file.md @@ -6,10 +6,6 @@ aliases: - /go/dockerfile/rule/copy-ignored-file/ --- -> [!NOTE] -> This check is experimental and is not enabled by default. To enable it, see -> [Experimental checks](https://docs.docker.com/go/build-checks-experimental/). - ## Output ```text diff --git a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/expose-invalid-format.md b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/expose-invalid-format.md new file mode 100644 index 00000000000..9198178f330 --- /dev/null +++ b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/expose-invalid-format.md @@ -0,0 +1,55 @@ +--- +title: ExposeInvalidFormat +description: >- + IP address and host-port mapping should not be used in EXPOSE instruction. This will become an error in a future release +aliases: + - /go/dockerfile/rule/expose-invalid-format/ +--- + +## Output + +```text +EXPOSE instruction should not define an IP address or host-port mapping, found '127.0.0.1:80:80' +``` + +## Description + +The [`EXPOSE`](https://docs.docker.com/reference/dockerfile/#expose) instruction +in a Dockerfile is used to indicate which ports the container listens on at +runtime. It should not include an IP address or host-port mapping, as this is +not the intended use of the `EXPOSE` instruction. Instead, it should only +specify the port number and optionally the protocol (TCP or UDP). + +> [!IMPORTANT] +> This will become an error in a future release. + +## Examples + +❌ Bad: IP address and host-port mapping used. + +```dockerfile +FROM alpine +EXPOSE 127.0.0.1:80:80 +``` + +✅ Good: only the port number is specified. + +```dockerfile +FROM alpine +EXPOSE 80 +``` + +❌ Bad: Host-port mapping used. + +```dockerfile +FROM alpine +EXPOSE 80:80 +``` + +✅ Good: only the port number is specified. + +```dockerfile +FROM alpine +EXPOSE 80 +``` + diff --git a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/expose-proto-casing.md b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/expose-proto-casing.md new file mode 100644 index 00000000000..cfde7fe322e --- /dev/null +++ b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/expose-proto-casing.md @@ -0,0 +1,37 @@ +--- +title: ExposeProtoCasing +description: >- + Protocol in EXPOSE instruction should be lowercase +aliases: + - /go/dockerfile/rule/expose-proto-casing/ +--- + +## Output + +```text +Defined protocol '80/TcP' in EXPOSE instruction should be lowercase +``` + +## Description + +Protocol names in the [`EXPOSE`](https://docs.docker.com/reference/dockerfile/#expose) +instruction should be specified in lowercase to maintain consistency and +readability. This rule checks for protocols that are not in lowercase and +reports them. + +## Examples + +❌ Bad: protocol is not in lowercase. + +```dockerfile +FROM alpine +EXPOSE 80/TcP +``` + +✅ Good: protocol is in lowercase. + +```dockerfile +FROM alpine +EXPOSE 80/tcp +``` + diff --git a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/legacy-key-value-format.md b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/legacy-key-value-format.md index dc43b53cb73..471b0d6ea4b 100644 --- a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/legacy-key-value-format.md +++ b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/legacy-key-value-format.md @@ -55,3 +55,18 @@ ENV DEPS="\ make" ``` +> [!NOTE] +> Be aware of leading whitespace when converting multi-line legacy syntax to +> the modern `key=value` format. In the legacy format, leading whitespace on +> continuation lines is included in the value. In the modern format with +> quoted values, leading whitespace inside the quotes is also preserved. If +> you don't want leading whitespace in the value, make sure to remove it when +> rewriting to the new format: +> +> ```dockerfile +> ENV DEPS="\ +> curl \ +> git \ +> make" +> ``` + diff --git a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/secrets-used-in-arg-or-env.md b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/secrets-used-in-arg-or-env.md index db9d1caae67..1184d089eb5 100644 --- a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/secrets-used-in-arg-or-env.md +++ b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/rules/secrets-used-in-arg-or-env.md @@ -28,10 +28,27 @@ See [Build secrets](https://docs.docker.com/build/building/secrets/). ## Examples -❌ Bad: `AWS_SECRET_ACCESS_KEY` is a secret value. +❌ Bad: using ARG to pass AWS credentials. ```dockerfile -FROM scratch +ARG AWS_ACCESS_KEY_ID ARG AWS_SECRET_ACCESS_KEY +RUN aws s3 cp s3://my-bucket/file . +``` + +✅ Good: using secret mounts with environment variables. + +```dockerfile +RUN --mount=type=secret,id=aws_key_id,env=AWS_ACCESS_KEY_ID \ + --mount=type=secret,id=aws_secret_key,env=AWS_SECRET_ACCESS_KEY \ + aws s3 cp s3://my-bucket/file . +``` + +To build with these secrets: + +```console +$ docker buildx build \ + --secret id=aws_key_id,env=AWS_ACCESS_KEY_ID \ + --secret id=aws_secret_key,env=AWS_SECRET_ACCESS_KEY . ``` diff --git a/_vendor/github.com/moby/moby/api/docs/CHANGELOG.md b/_vendor/github.com/moby/moby/api/docs/CHANGELOG.md new file mode 100644 index 00000000000..b8ee6ede658 --- /dev/null +++ b/_vendor/github.com/moby/moby/api/docs/CHANGELOG.md @@ -0,0 +1,1069 @@ +--- +title: "Engine API version history" +description: "Documentation of changes that have been made to Engine API." +keywords: "API, Docker, rcli, REST, documentation" +--- + +<!-- This file is maintained within the moby/moby GitHub + repository at https://github.com/moby/moby/. Make all + pull requests against that repo. If you see this file in + another repository, consider it read-only there, as it will + periodically be overwritten by the definitive file. Pull + requests which include edits to this file in other repositories + will be rejected. +--> + +## v1.54 API changes + +* `GET /images/json` now supports an `identity` query parameter. When set, + the response includes manifest summaries and may include an `Identity` field + for each manifest with trusted identity and origin information. +* `POST /networks/{id}/connect` now correctly applies the `MacAddress` field in + `EndpointSettings`. This field was added in API v1.44, but was previously ignored. + +## v1.53 API changes + +* `GET /info` now includes an `NRI` field. If the Node Resource Interface (NRI) + is enabled, this field contains information describing it. +* `GET /events` now also supports [`application/jsonl`](https://jsonlines.org/) + when negotiating content-type. +* `GET /images/{name}/json` now includes an `Identity` field with trusted + identity and origin information for the image. +* Deprecated: The `POST /grpc` and `POST /session` endpoints are deprecated and + will be removed in a future version. + +## v1.52 API changes + +* `GET /images/{name}/get` now accepts multiple `platform` query-arguments + to allow selecting which platform(s) of a multi-platform image must be + saved. +* `POST /images/load` now accepts multiple `platform` query-arguments + to allow selecting which platform(s) of a multi-platform image to load. +* `GET /events` no longer includes the deprecated `status`, `id`, and `from` + fields. These fields were removed in API v1.22, but still included + in the response. +* `GET /networks/{id}` now includes a `Status` field, providing statistics + about IPAM allocations for the subnets assigned to the network. +* Deprecated: the Engine was automatically backfilling empty `PortBindings` lists with + a PortBinding with an empty HostIP and HostPort when calling `POST /containers/{id}/start`. + This behavior is now deprecated, and a warning is returned by `POST /containers/create`. + A future API version will drop empty `PortBindings` list altogether. +* `GET /images/{name}/json` now omits the following `Config` fields when + not set, to closer align with the implementation of the [OCI Image Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/specs-go/v1/config.go#L23-L62) + `Cmd`, `Entrypoint`, `Env`, `Labels`, `OnBuild`, `User`, `Volumes`, and `WorkingDir`. +* `GET /images/{name}/json` now omits the following fields if their value + is empty: `Parent`, `Comment`, `DockerVersion`, `Author`. The `Parent` + and `DockerVersion` fields were set by the legacy builder, and are no + longer set when using BuildKit. The `Author` field is set through the + `MAINTAINER` Dockerfile instruction, which is deprecated, and the `Comment` + field is option, and may not be set depending on how the image was created. +* `GET /container/{id}/json` now omits `Config.OnBuild` if its value is empty. +* `GET /containers/{id}/json`: the `NetworkSettings` no longer returns the deprecated + `Bridge`, `HairpinMode`, `LinkLocalIPv6Address`, `LinkLocalIPv6PrefixLen`, + `SecondaryIPAddresses`, `SecondaryIPv6Addresses`, `EndpointID`, `Gateway`, + `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, + `IPv6Gateway`, and `MacAddress` fields. These fields were deprecated in + API v1.21 (docker v1.9.0) but kept around for backward compatibility. +* Removed the `KernelMemoryTCP` field from the `POST /containers/{id}/update` and + `GET /containers/{id}/json` endpoints, any value it is set to will be ignored + on API version `v1.52` and up. Older API versions still accept this field, but + may take no effect, depending on the kernel version and OCI runtime in use. +* Removed the `KernelMemoryTCP` field from the `GET /info` endpoint. +* `GET /events` supports content-type negotiation and can produce either `application/x-ndjson` + (Newline delimited JSON object stream) or `application/json-seq` (RFC7464). +* `POST /containers/create` no longer supports configuring a container-wide MAC address + via the container's `Config.MacAddress` field. A container's MAC address can now only + be configured via endpoint settings when connecting to a network. +* `GET /services` now returns `SwapBytes` and `MemorySwappiness` fields as part + of the `Resource` requirements. +* `GET /services/{id}` now returns `SwapBytes` and `MemorySwappiness` fields as + part of the `Resource` requirements. +* `POST /services/create` now accepts `SwapBytes` and `MemorySwappiness` fields + as part of the `Resource` requirements. +* `POST /services/{id}/update` now accepts `SwapBytes` and `MemorySwappiness` + fields as part of the `Resource` requirements. +* `GET /tasks` now returns `SwapBytes` and `MemorySwappiness` fields as part + of the `Resource` requirements. +* `GET /tasks/{id}` now returns `SwapBytes` and `MemorySwappiness` fields as + part of the `Resource` requirements. +* `GET /containers/{id}/stats` now returns an `os_type` field to allow platform- + specific handling of the stats. +* `GET /system/df` returns `ImagesUsage`, `ContainersUsage`, `VolumesUsage`, and + `BuildCacheUsage` fields with brief system disk usage data for each system object type. + The endpoint supports the `?verbose=1` query to return verbose system disk usage information. +* Deprecated: `GET /system/df` response fields `LayersSize`, `Images`, `Containers`, + `Volumes`, and `BuildCache` have been removed in favor of the-type specific usage fields. + API v1.52 returns both the legacy and current fields to help existing integrations + to transition to the new response. The legacy fields are not populated if the + `verbose` query parameter is used. Starting with API v1.53, the legacy fields + will no longer be returned. + +## v1.51 API changes + +* `GET /images/json` now sets the value of `Containers` field for all images + to the count of containers using the image. + This field was previously always -1. +* Deprecated: The field `NetworkSettings.Bridge` returned by `GET /containers/{id}/json` + is deprecated and will be removed in the next API version. +* Deprecated: The field `KernelMemoryTCP` as part of `POST /containers/{id}/update` + and returned by `GET /containers/{id}/json` is deprecated and will be removed + in the next API version. +* Deprecated: The field `KernelMemoryTCP` as part of `GET /info` is deprecated + and will be removed in the next API version. +* Deprecated: the `Parent` and `DockerVersion` fields returned by the + `GET /images/{name}/json` endpoint are deprecated. These fields are set by + the legacy builder, and are no longer set when using BuildKit. The API + continues returning these fields when set for informational purposes, but + they should not be depended on as they will be omitted once the legacy builder + is removed. +* Deprecated: the `Config.DockerVersion` field returned by the `GET /plugins` + and `GET /images/{name}/json` endpoints is deprecated. The field is no + longer set, and is omitted when empty. + +## v1.50 API changes + +* `GET /info` now includes a `DiscoveredDevices` field. This is an array of + `DeviceInfo` objects, each providing details about a device discovered by a + device driver. + Currently only the CDI device driver is supported. +* `DELETE /images/{name}` now supports a `platforms` query parameter. It accepts + an array of JSON-encoded OCI Platform objects, allowing for selecting specific + platforms to delete content for. +* Deprecated: The `BridgeNfIptables` and `BridgeNfIp6tables` fields in the + `GET /info` response were deprecated in API v1.48, and are now omitted + in API v1.50. +* Deprecated: `GET /images/{name}/json` no longer returns the following `Config` + fields; `Hostname`, `Domainname`, `AttachStdin`, `AttachStdout`, `AttachStderr` + `Tty`, `OpenStdin`, `StdinOnce`, `Image`, `NetworkDisabled` (already omitted unless set), + `MacAddress` (already omitted unless set), `StopTimeout` (already omitted unless set). + These additional fields were included in the response due to an implementation + detail but not part of the image's Configuration. These fields were marked + deprecated in API v1.46, and are now omitted. Older versions of the API still + return these fields, but they are always empty. + +## v1.49 API changes + +* `GET /images/{name}/json` now supports a `platform` parameter (JSON + encoded OCI Platform type) allowing to specify a platform of the multi-platform + image to inspect. + This option is mutually exclusive with the `manifests` option. +* `GET /info` now returns a `FirewallBackend` containing information about + the daemon's firewalling configuration. +* Deprecated: The `AllowNondistributableArtifactsCIDRs` and `AllowNondistributableArtifactsHostnames` + fields in the `RegistryConfig` struct in the `GET /info` response are omitted + in API v1.49. +* Deprecated: The `ContainerdCommit.Expected`, `RuncCommit.Expected`, and + `InitCommit.Expected` fields in the `GET /info` endpoint were deprecated + in API v1.48, and are now omitted in API v1.49. + +## v1.48 API changes + +* Deprecated: The "error" and "progress" fields in streaming responses for + endpoints that return a JSON progress response, such as `POST /images/create`, + `POST /images/{name}/push`, and `POST /build` are deprecated. These fields + were marked deprecated in API v1.4 (docker v0.6.0) and API v1.8 (docker v0.7.1) + respectively, but still returned. These fields will be left empty or will + be omitted in a future API version. Users should use the information in the + `errorDetail` and `progressDetail` fields instead. +* Deprecated: The "allow-nondistributable-artifacts" daemon configuration is + deprecated and enabled by default. The `AllowNondistributableArtifactsCIDRs` + and `AllowNondistributableArtifactsHostnames` fields in the `RegistryConfig` + struct in the `GET /info` response will now always be `null` and will be + omitted in API v1.49. +* Deprecated: The `BridgeNfIptables` and `BridgeNfIp6tables` fields in the + `GET /info` response are now always be `false` and will be omitted in API + v1.49. The netfilter module is now loaded on-demand, and no longer during + daemon startup, making these fields obsolete. +* Deprecated: The `POST /build/prune` `keep-storage` query parameter has been + renamed to `reserved-space`. `keep-storage` support will be removed in API v1.52. +* `GET /images/{name}/history` now supports a `platform` parameter (JSON + encoded OCI Platform type) that allows to specify a platform to show the + history of. +* `POST /images/{name}/load` and `GET /images/{name}/get` now support a + `platform` parameter (JSON encoded OCI Platform type) that allows to specify + a platform to load/save. Not passing this parameter will result in + loading/saving the full multi-platform image. +* `POST /containers/create` now includes a warning in the response when setting + the container-wide `Config.VolumeDriver` option in combination with volumes + defined through `Mounts` because the `VolumeDriver` option has no effect on + those volumes. This warning was previously generated by the CLI, but now + moved to the daemon so that other clients can also get this warning. +* `POST /containers/create` now supports `Mount` of type `image` for mounting + an image inside a container. +* Deprecated: The `ContainerdCommit.Expected`, `RuncCommit.Expected`, and + `InitCommit.Expected` fields in the `GET /info` endpoint are deprecated + and will be omitted in API v1.49. +* `Sysctls` in `HostConfig` (top level `--sysctl` settings) for `eth0` are + no longer migrated to `DriverOpts`, as described in the changes for v1.46. +* `GET /images/json` and `GET /images/{name}/json` responses now include + `Descriptor` field, which contains an OCI descriptor of the image target. + The new field will only be populated if the daemon provides a multi-platform + image store. + WARNING: This is experimental and may change at any time without any backward + compatibility. +* `GET /images/{name}/json` response now will return the `Manifests` field + containing information about the sub-manifests contained in the image index. + This includes things like platform-specific manifests and build attestations. + The new field will only be populated if the request also sets the `manifests` + query parameter to `true`. + This acts the same as in the `GET /images/json` endpoint. + WARNING: This is experimental and may change at any time without any backward compatibility. +* `GET /containers/{name}/json` now returns an `ImageManifestDescriptor` field + containing the OCI descriptor of the platform-specific image manifest of the + image that was used to create the container. + This field is only populated if the daemon provides a multi-platform image + store. +* `POST /networks/create` now has an `EnableIPv4` field. Setting it to `false` + disables IPv4 IPAM for the network. It can only be set to `false` if the + daemon has experimental features enabled. +* `GET /networks/{id}` now returns an `EnableIPv4` field showing whether the + network has IPv4 IPAM enabled. +* `POST /networks/{id}/connect` and `POST /containers/create` now accept a + `GwPriority` field in `EndpointsConfig`. This value is used to determine which + network endpoint provides the default gateway for the container. The endpoint + with the highest priority is selected. If multiple endpoints have the same + priority, endpoints are sorted lexicographically by their network name, and + the one that sorts first is picked. +* `GET /containers/json` now returns a `GwPriority` field in `NetworkSettings` + for each network endpoint. +* API debug endpoints (`GET /debug/vars`, `GET /debug/pprof/`, `GET /debug/pprof/cmdline`, + `GET /debug/pprof/profile`, `GET /debug/pprof/symbol`, `GET /debug/pprof/trace`, + `GET /debug/pprof/{name}`) are now also accessible through the versioned-API + paths (`/v<API-version>/<endpoint>`). +* `POST /build/prune` renames `keep-storage` to `reserved-space` and now supports + additional prune parameters `max-used-space` and `min-free-space`. +* `GET /containers/json` now returns an `ImageManifestDescriptor` field + matching the same field in `/containers/{name}/json`. + This field is only populated if the daemon provides a multi-platform image + store. + +## v1.47 API changes + +* `GET /images/json` response now includes `Manifests` field, which contains + information about the sub-manifests included in the image index. This + includes things like platform-specific manifests and build attestations. + The new field will only be populated if the request also sets the `manifests` + query parameter to `true`. + WARNING: This is experimental and may change at any time without any backward + compatibility. +* `GET /info` no longer includes warnings when `bridge-nf-call-iptables` or + `bridge-nf-call-ip6tables` are disabled when the daemon was started. The + `br_netfilter` module is now attempted to be loaded when needed, making those + warnings inaccurate. This change is not versioned, and affects all API versions + if the daemon has this patch. + +## v1.46 API changes + +* `GET /info` now includes a `Containerd` field containing information about + the location of the containerd API socket and containerd namespaces used + by the daemon to run containers and plugins. +* `POST /containers/create` field `NetworkingConfig.EndpointsConfig.DriverOpts`, + and `POST /networks/{id}/connect` field `EndpointsConfig.DriverOpts`, now + support label `com.docker.network.endpoint.sysctls` for setting per-interface + sysctls. The value is a comma separated list of sysctl assignments, the + interface name must be "IFNAME". For example, to set + `net.ipv4.config.eth0.log_martians=1`, use + `net.ipv4.config.IFNAME.log_martians=1`. In API versions up-to 1.46, top level + `--sysctl` settings for `eth0` will be migrated to `DriverOpts` when possible. + This automatic migration will be removed in a future release. +* `GET /containers/json` now returns the annotations of containers. +* `POST /images/{name}/push` now supports a `platform` parameter (JSON encoded + OCI Platform type) that allows selecting a specific platform manifest from + the multi-platform image. +* `POST /containers/create` now takes `Options` as part of `HostConfig.Mounts.TmpfsOptions` to set options for tmpfs mounts. +* `POST /services/create` now takes `Options` as part of `ContainerSpec.Mounts.TmpfsOptions`, to set options for tmpfs mounts. +* `GET /events` now supports image `create` event that is emitted when a new + image is built regardless if it was tagged or not. + +### Deprecated Config fields in `GET /images/{name}/json` response + +The `Config` field returned by this endpoint (used for "image inspect") returns +additional fields that are not part of the image's configuration and not part of +the [Docker Image Spec] and the [OCI Image Spec]. + +These additional fields are included in the response, due to an +implementation detail, where the [api/types.ImageInspec] type used +for the response is using the [container.Config] type. + +The [container.Config] type is a superset of the image config, and while the +image's Config is used as a _template_ for containers created from the image, +the additional fields are set at runtime (from options passed when creating +the container) and not taken from the image Config. + +These fields are never set (and always return the default value for the type), +but are not omitted in the response when left empty. As these fields were not +intended to be part of the image configuration response, they are deprecated, +and will be removed from the API. + +The following fields are currently included in the API response, but +are not part of the underlying image's Config, and deprecated: + +- `Hostname` +- `Domainname` +- `AttachStdin` +- `AttachStdout` +- `AttachStderr` +- `Tty` +- `OpenStdin` +- `StdinOnce` +- `Image` +- `NetworkDisabled` (already omitted unless set) +- `MacAddress` (already omitted unless set) +- `StopTimeout` (already omitted unless set) + +[Docker image spec]: https://github.com/moby/docker-image-spec/blob/v1.3.1/specs-go/v1/image.go#L19-L32 +[OCI Image Spec]: https://github.com/opencontainers/image-spec/blob/v1.1.0/specs-go/v1/config.go#L24-L62 +[api/types.ImageInspec]: https://github.com/moby/moby/blob/v26.1.4/api/types/types.go#L87-L104 +[container.Config]: https://github.com/moby/moby/blob/v26.1.4/api/types/container/config.go#L47-L82 + +* `POST /services/create` and `POST /services/{id}/update` now support OomScoreAdj + +## v1.45 API changes + +* `POST /containers/create` now supports `VolumeOptions.Subpath` which allows a + subpath of a named volume to be mounted. +* `POST /images/search` will always assume a `false` value for the `is-automated` + field. Consequently, searching for `is-automated=true` will yield no results, + while `is-automated=false` will be a no-op. +* `GET /images/{name}/json` no longer includes the `Container` and + `ContainerConfig` fields. To access image configuration, use `Config` field + instead. +* The `Aliases` field returned in calls to `GET /containers/{name:.*}/json` no + longer contains the short container ID, but instead will reflect exactly the + values originally submitted to the `POST /containers/create` endpoint. The + newly introduced `DNSNames` should now be used instead when short container + IDs are needed. + +## v1.44 API changes + +* GET `/images/json` now accepts an `until` filter. This accepts a timestamp and + lists all images created before it. The `<timestamp>` can be Unix timestamps, + date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) + computed relative to the daemon machine’s time. This change is not versioned, + and affects all API versions if the daemon has this patch. +* The `VirtualSize` field in the `GET /images/{name}/json`, `GET /images/json`, + and `GET /system/df` responses is now omitted. Use the `Size` field instead, + which contains the same information. +* Deprecated: The `is_automated` field in the `GET /images/search` response has + been deprecated and will always be set to false in the future because Docker + Hub is deprecating the `is_automated` field in its search API. The deprecation + is not versioned, and applies to all API versions. +* Deprecated: The `is-automated` filter for the `GET /images/search` endpoint. + The `is_automated` field has been deprecated by Docker Hub's search API. + Consequently, searching for `is-automated=true` will yield no results. The + deprecation is not versioned, and applies to all API versions. +* Read-only bind mounts are now made recursively read-only on kernel >= 5.12 + with runtimes which support the feature. + `POST /containers/create`, `GET /containers/{id}/json`, and `GET /containers/json` now supports + `BindOptions.ReadOnlyNonRecursive` and `BindOptions.ReadOnlyForceRecursive` to customize the behavior. +* `POST /containers/create` now accepts a `HealthConfig.StartInterval` to set the + interval for health checks during the start period. +* `GET /info` now includes a `CDISpecDirs` field indicating the configured CDI + specifications directories. The use of the applied setting requires the daemon + to have experimental enabled, and for non-experimental daemons an empty list is + always returned. +* `POST /networks/create` now returns a 400 if the `IPAMConfig` has invalid + values. Note that this change is _unversioned_ and applied to all API + versions on daemon that support version 1.44. +* `POST /networks/create` with a duplicated name now fails systematically. As + such, the `CheckDuplicate` field is now deprecated. Note that this change is + _unversioned_ and applied to all API versions on daemon that support version + 1.44. +* `POST /containers/create` now accepts multiple `EndpointSettings` in + `NetworkingConfig.EndpointSettings`. +* `POST /containers/create` and `POST /networks/{id}/connect` will now catch + validation errors that were previously only returned during `POST /containers/{id}/start`. + These endpoints will also return the full set of validation errors they find, + instead of returning only the first one. + Note that this change is _unversioned_ and applies to all API versions. +* `POST /services/create` and `POST /services/{id}/update` now accept `Seccomp` + and `AppArmor` fields in the `ContainerSpec.Privileges` object. This allows + some configuration of Seccomp and AppArmor in Swarm services. +* A new endpoint-specific `MacAddress` field has been added to `NetworkSettings.EndpointSettings` + on `POST /containers/create`, and to `EndpointConfig` on `POST /networks/{id}/connect`. + The container-wide `MacAddress` field in `Config`, on `POST /containers/create`, is now deprecated. +* The field `Networks` in the `POST /services/create` and `POST /services/{id}/update` + requests is now deprecated. You should instead use the field `TaskTemplate.Networks`. +* The `Container` and `ContainerConfig` fields in the `GET /images/{name}/json` + response are deprecated and will no longer be included in API v1.45. +* `GET /info` now includes `status` properties in `Runtimes`. +* A new field named `DNSNames` and containing all non-fully qualified DNS names + a container takes on a specific network has been added to `GET /containers/{name:.*}/json`. +* The `Aliases` field returned in calls to `GET /containers/{name:.*}/json` in v1.44 and older + versions contains the short container ID. This will change in the next API version, v1.45. + Starting with that API version, this specific value will be removed from the `Aliases` field + such that this field will reflect exactly the values originally submitted to the + `POST /containers/create` endpoint. The newly introduced `DNSNames` should now be used instead. +* The fields `HairpinMode`, `LinkLocalIPv6Address`, `LinkLocalIPv6PrefixLen`, `SecondaryIPAddresses`, + `SecondaryIPv6Addresses` available in `NetworkSettings` when calling `GET /containers/{id}/json` are + deprecated and will be removed in a future release. You should instead look for the default network in + `NetworkSettings.Networks`. +* `GET /images/{id}/json` omits the `Created` field (previously it was `0001-01-01T00:00:00Z`) + if the `Created` field is missing from the image config. + +## v1.43 API changes + +* `POST /containers/create` now accepts `Annotations` as part of `HostConfig`. + Can be used to attach arbitrary metadata to the container, which will also be + passed to the runtime when the container is started. +* `GET /images/json` no longer includes hardcoded `<none>:<none>` and + `<none>@<none>` in `RepoTags` and`RepoDigests` for untagged images. + In such cases, empty arrays will be produced instead. +* The `VirtualSize` field in the `GET /images/{name}/json`, `GET /images/json`, + and `GET /system/df` responses is deprecated and will no longer be included + in API v1.44. Use the `Size` field instead, which contains the same information. +* `GET /info` now includes `no-new-privileges` in the `SecurityOptions` string + list when this option is enabled globally. This change is not versioned, and + affects all API versions if the daemon has this patch. + +## v1.42 API changes + +* Removed the `BuilderSize` field on the `GET /system/df` endpoint. This field + was introduced in API 1.31 as part of an experimental feature, and no longer + used since API 1.40. + Use field `BuildCache` instead to track storage used by the builder component. +* `POST /containers/{id}/stop` and `POST /containers/{id}/restart` now accept a + `signal` query parameter, which allows overriding the container's default stop- + signal. +* `GET /images/json` now accepts query parameter `shared-size`. When set `true`, + images returned will include `SharedSize`, which provides the size on disk shared + with other images present on the system. +* `GET /system/df` now accepts query parameter `type`. When set, + computes and returns data only for the specified object type. + The parameter can be specified multiple times to select several object types. + Supported values are: `container`, `image`, `volume`, `build-cache`. +* `GET /system/df` can now be used concurrently. If a request is made while a + previous request is still being processed, the request will receive the result + of the already running calculation, once completed. Previously, an error + (`a disk usage operation is already running`) would be returned in this + situation. This change is not versioned, and affects all API versions if the + daemon has this patch. +* The `POST /images/create` now supports both the operating system and architecture + that is passed through the `platform` query parameter when using the `fromSrc` + option to import an image from an archive. Previously, only the operating system + was used and the architecture was ignored. If no `platform` option is set, the + host's operating system and architecture as used as default. This change is not + versioned, and affects all API versions if the daemon has this patch. +* The `POST /containers/{id}/wait` endpoint now returns a `400` status code if an + invalid `condition` is provided (on API 1.30 and up). +* Removed the `KernelMemory` field from the `POST /containers/create` and + `POST /containers/{id}/update` endpoints, any value it is set to will be ignored + on API version `v1.42` and up. Older API versions still accept this field, but + may take no effect, depending on the kernel version and OCI runtime in use. +* `GET /containers/{id}/json` now omits the `KernelMemory` and `KernelMemoryTCP` + if they are not set. +* `GET /info` now omits the `KernelMemory` and `KernelMemoryTCP` if they are not + supported by the host or host's configuration (if cgroups v2 are in use). +* `GET /_ping` and `HEAD /_ping` now return `Builder-Version` by default. + This header contains the default builder to use, and is a recommendation as + advertised by the daemon. However, it is up to the client to choose which builder + to use. + + The default value on Linux is version "2" (BuildKit), but the daemon can be + configured to recommend version "1" (classic Builder). Windows does not yet + support BuildKit for native Windows images, and uses "1" (classic builder) as + a default. + + This change is not versioned, and affects all API versions if the daemon has + this patch. +* `GET /_ping` and `HEAD /_ping` now return a `Swarm` header, which allows a + client to detect if Swarm is enabled on the daemon, without having to call + additional endpoints. + This change is not versioned, and affects all API versions if the daemon has + this patch. Clients must consider this header "optional", and fall back to + using other endpoints to get this information if the header is not present. + + The `Swarm` header can contain one of the following values: + + - "inactive" + - "pending" + - "error" + - "locked" + - "active/worker" + - "active/manager" +* `POST /containers/create` for Windows containers now accepts a new syntax in + `HostConfig.Resources.Devices.PathOnHost`. As well as the existing `class/<GUID>` + syntax, `<IDType>://<ID>` is now recognised. Support for specific `<IDType>` values + depends on the underlying implementation and Windows version. This change is not + versioned, and affects all API versions if the daemon has this patch. +* `GET /containers/{id}/attach`, `GET /exec/{id}/start`, `GET /containers/{id}/logs` + `GET /services/{id}/logs` and `GET /tasks/{id}/logs` now set Content-Type header + to `application/vnd.docker.multiplexed-stream` when a multiplexed stdout/stderr + stream is sent to client, `application/vnd.docker.raw-stream` otherwise. +* `POST /volumes/create` now accepts a new `ClusterVolumeSpec` to create a cluster + volume (CNI). This option can only be used if the daemon is a Swarm manager. + The Volume response on creation now also can contain a `ClusterVolume` field + with information about the created volume. +* The `BuildCache.Parent` field, as returned by `GET /system/df` is deprecated + and is now omitted. API versions before v1.42 continue to include this field. +* `GET /system/df` now includes a new `Parents` field, for "build-cache" records, + which contains a list of parent IDs for the build-cache record. +* Volume information returned by `GET /volumes/{name}`, `GET /volumes` and + `GET /system/df` can now contain a `ClusterVolume` if the volume is a cluster + volume (requires the daemon to be a Swarm manager). +* The `Volume` type, as returned by `Added new `ClusterVolume` fields +* Added a new `PUT /volumes{name}` endpoint to update cluster volumes (CNI). + Cluster volumes are only supported if the daemon is a Swarm manager. +* `GET /containers/{name}/attach/ws` endpoint now accepts `stdin`, `stdout` and + `stderr` query parameters to only attach to configured streams. + + NOTE: These parameters were documented before in older API versions, but not + actually supported. API versions before v1.42 continue to ignore these parameters + and default to attaching to all streams. To preserve the pre-v1.42 behavior, + set all three query parameters (`?stdin=1,stdout=1,stderr=1`). +* `POST /containers/create` on Linux now respects the `HostConfig.ConsoleSize` property. + Container is immediately created with the desired terminal size and clients no longer + need to set the desired size on their own. +* `POST /containers/create` allow to set `CreateMountpoint` for host path to be + created if missing. This brings parity with `Binds` +* `POST /containers/create` rejects request if BindOptions|VolumeOptions|TmpfsOptions + is set with a non-matching mount Type. +* `POST /containers/{id}/exec` now accepts an optional `ConsoleSize` parameter. + It allows to set the console size of the executed process immediately when it's created. +* `POST /volumes/prune` will now only prune "anonymous" volumes (volumes which were not given a name) by default. A new filter parameter `all` can be set to a truth-y value (`true`, `1`) to get the old behavior. + +## v1.41 API changes + +* `GET /events` now returns `prune` events after pruning resources have completed. + Prune events are returned for `container`, `network`, `volume`, `image`, and + `builder`, and have a `reclaimed` attribute, indicating the amount of space + reclaimed (in bytes). +* `GET /info` now returns a `CgroupVersion` field, containing the cgroup version. +* `GET /info` now returns a `DefaultAddressPools` field, containing a list of + custom default address pools for local networks, which can be specified in the + `daemon.json` file or `--default-address-pool` dockerd option. +* `POST /services/create` and `POST /services/{id}/update` now supports `BindOptions.NonRecursive`. +* The `ClusterStore` and `ClusterAdvertise` fields in `GET /info` are deprecated + and are now omitted if they contain an empty value. This change is not versioned, + and affects all API versions if the daemon has this patch. +* The `filter` (singular) query parameter, which was deprecated in favor of the + `filters` option in Docker 1.13, has now been removed from the `GET /images/json` + endpoint. The parameter remains available when using API version 1.40 or below. +* `GET /services` now returns `CapAdd` and `CapDrop` as part of the `ContainerSpec`. +* `GET /services/{id}` now returns `CapAdd` and `CapDrop` as part of the `ContainerSpec`. +* `POST /services/create` now accepts `CapAdd` and `CapDrop` as part of the `ContainerSpec`. +* `POST /services/{id}/update` now accepts `CapAdd` and `CapDrop` as part of the `ContainerSpec`. +* `GET /tasks` now returns `CapAdd` and `CapDrop` as part of the `ContainerSpec`. +* `GET /tasks/{id}` now returns `CapAdd` and `CapDrop` as part of the `ContainerSpec`. +* `GET /services` now returns `Pids` in `TaskTemplate.Resources.Limits`. +* `GET /services/{id}` now returns `Pids` in `TaskTemplate.Resources.Limits`. +* `POST /services/create` now accepts `Pids` in `TaskTemplate.Resources.Limits`. +* `POST /services/{id}/update` now accepts `Pids` in `TaskTemplate.Resources.Limits` + to limit the maximum number of PIDs. +* `GET /tasks` now returns `Pids` in `TaskTemplate.Resources.Limits`. +* `GET /tasks/{id}` now returns `Pids` in `TaskTemplate.Resources.Limits`. +* `POST /containers/create` now accepts a `platform` query parameter in the format + `os[/arch[/variant]]`. + + When set, the daemon checks if the requested image is present in the local image + cache with the given OS and Architecture, and otherwise returns a `404` status. + + If the option is _not_ set, the host's native OS and Architecture are used to + look up the image in the image cache. However, if no platform is passed and the + given image _does_ exist in the local image cache, but its OS or architecture + do not match, the container is created with the available image, and a warning + is added to the `Warnings` field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + +* `POST /containers/create` on Linux now accepts the `HostConfig.CgroupnsMode` property. + Set the property to `host` to create the container in the daemon's cgroup namespace, or + `private` to create the container in its own private cgroup namespace. The per-daemon + default is `host`, and can be changed by using the`CgroupNamespaceMode` daemon configuration + parameter. +* `GET /info` now returns an `OSVersion` field, containing the operating system's + version. This change is not versioned, and affects all API versions if the daemon + has this patch. +* `GET /info` no longer returns the `SystemStatus` field if it does not have a + value set. This change is not versioned, and affects all API versions if the + daemon has this patch. +* `GET /services` now accepts query parameter `status`. When set `true`, + services returned will include `ServiceStatus`, which provides Desired, + Running, and Completed task counts for the service. +* `GET /services` may now include `ReplicatedJob` or `GlobalJob` as the `Mode` + in a `ServiceSpec`. +* `GET /services/{id}` may now include `ReplicatedJob` or `GlobalJob` as the + `Mode` in a `ServiceSpec`. +* `POST /services/create` now accepts `ReplicatedJob or `GlobalJob` as the `Mode` + in the `ServiceSpec. +* `POST /services/{id}/update` accepts updating the fields of the + `ReplicatedJob` object in the `ServiceSpec.Mode`. The service mode still + cannot be changed, however. +* `GET /services` now includes `JobStatus` on Services with mode + `ReplicatedJob` or `GlobalJob`. +* `GET /services/{id}` now includes `JobStatus` on Services with mode + `ReplicatedJob` or `GlobalJob`. +* `GET /tasks` now includes `JobIteration` on Tasks spawned from a job-mode + service. +* `GET /tasks/{id}` now includes `JobIteration` on the task if spawned from a + job-mode service. +* `GET /containers/{id}/stats` now accepts a query param (`one-shot`) which, when used with `stream=false` fetches a + single set of stats instead of waiting for two collection cycles to have 2 CPU stats over a 1 second period. +* The `KernelMemory` field in `HostConfig.Resources` is now deprecated. +* The `KernelMemory` field in `Info` is now deprecated. +* `GET /services` now returns `Ulimits` as part of `ContainerSpec`. +* `GET /services/{id}` now returns `Ulimits` as part of `ContainerSpec`. +* `POST /services/create` now accepts `Ulimits` as part of `ContainerSpec`. +* `POST /services/{id}/update` now accepts `Ulimits` as part of `ContainerSpec`. + +## v1.40 API changes + +* The `/_ping` endpoint can now be accessed both using `GET` or `HEAD` requests. + when accessed using a `HEAD` request, all headers are returned, but the body + is empty (`Content-Length: 0`). This change is not versioned, and affects all + API versions if the daemon has this patch. Clients are recommended to try + using `HEAD`, but fallback to `GET` if the `HEAD` requests fails. +* `GET /_ping` and `HEAD /_ping` now set `Cache-Control` and `Pragma` headers to + prevent the result from being cached. This change is not versioned, and affects + all API versions if the daemon has this patch. +* `GET /services` now returns `Sysctls` as part of the `ContainerSpec`. +* `GET /services/{id}` now returns `Sysctls` as part of the `ContainerSpec`. +* `POST /services/create` now accepts `Sysctls` as part of the `ContainerSpec`. +* `POST /services/{id}/update` now accepts `Sysctls` as part of the `ContainerSpec`. +* `POST /services/create` now accepts `Config` as part of `ContainerSpec.Privileges.CredentialSpec`. +* `POST /services/{id}/update` now accepts `Config` as part of `ContainerSpec.Privileges.CredentialSpec`. +* `POST /services/create` now includes `Runtime` as an option in `ContainerSpec.Configs` +* `POST /services/{id}/update` now includes `Runtime` as an option in `ContainerSpec.Configs` +* `GET /tasks` now returns `Sysctls` as part of the `ContainerSpec`. +* `GET /tasks/{id}` now returns `Sysctls` as part of the `ContainerSpec`. +* `GET /networks` now supports a `dangling` filter type. When set to `true` (or + `1`), the endpoint returns all networks that are not in use by a container. When + set to `false` (or `0`), only networks that are in use by one or more containers + are returned. +* `GET /nodes` now supports a filter type `node.label` filter to filter nodes based + on the node.label. The format of the label filter is `node.label=<key>`/`node.label=<key>=<value>` + to return those with the specified labels, or `node.label!=<key>`/`node.label!=<key>=<value>` + to return those without the specified labels. +* `POST /containers/create` now accepts a `fluentd-async` option in `HostConfig.LogConfig.Config` + when using the Fluentd logging driver. This option deprecates the `fluentd-async-connect` + option, which remains functional, but will be removed in a future release. Users + are encouraged to use the `fluentd-async` option going forward. This change is + not versioned, and affects all API versions if the daemon has this patch. +* `POST /containers/create` now accepts a `fluentd-request-ack` option in + `HostConfig.LogConfig.Config` when using the Fluentd logging driver. If enabled, + the Fluentd logging driver sends the chunk option with a unique ID. The server + will respond with an acknowledgement. This option improves the reliability of + the message transmission. This change is not versioned, and affects all API + versions if the daemon has this patch. +* `POST /containers/create`, `GET /containers/{id}/json`, and `GET /containers/json` now supports + `BindOptions.NonRecursive`. +* `POST /swarm/init` now accepts a `DataPathPort` property to set data path port number. +* `GET /info` now returns information about `DataPathPort` that is currently used in swarm +* `GET /info` now returns `PidsLimit` boolean to indicate if the host kernel has + PID limit support enabled. +* `GET /info` now includes `name=rootless` in `SecurityOptions` when the daemon is running in + rootless mode. This change is not versioned, and affects all API versions if the daemon has + this patch. +* `GET /info` now returns `none` as `CgroupDriver` when the daemon is running in rootless mode. + This change is not versioned, and affects all API versions if the daemon has this patch. +* `POST /containers/create` now accepts `DeviceRequests` as part of `HostConfig`. + Can be used to set Nvidia GPUs. +* `GET /swarm` endpoint now returns DataPathPort info +* `POST /containers/create` now takes `KernelMemoryTCP` field to set hard limit for kernel TCP buffer memory. +* `GET /service` now returns `MaxReplicas` as part of the `Placement`. +* `GET /service/{id}` now returns `MaxReplicas` as part of the `Placement`. +* `POST /service/create` and `POST /services/(id or name)/update` now take the field `MaxReplicas` + as part of the service `Placement`, allowing to specify maximum replicas per node for the service. +* `POST /containers/create` on Linux now creates a container with `HostConfig.IpcMode=private` + by default, if IpcMode is not explicitly specified. The per-daemon default can be changed + back to `shareable` by using `DefaultIpcMode` daemon configuration parameter. +* `POST /containers/{id}/update` now accepts a `PidsLimit` field to tune a container's + PID limit. Set `0` or `-1` for unlimited. Leave `null` to not change the current value. +* `POST /build` now accepts `outputs` key for configuring build outputs when using BuildKit mode. + +## V1.39 API changes + +* `GET /info` now returns an empty string, instead of `<unknown>` for `KernelVersion` + and `OperatingSystem` if the daemon was unable to obtain this information. +* `GET /info` now returns information about the product license, if a license + has been applied to the daemon. +* `GET /info` now returns a `Warnings` field, containing warnings and informational + messages about missing features, or issues related to the daemon configuration. +* `POST /swarm/init` now accepts a `DefaultAddrPool` property to set global scope default address pool +* `POST /swarm/init` now accepts a `SubnetSize` property to set global scope networks by giving the + length of the subnet masks for every such network +* `POST /session` (added in [V1.31](#v131-api-changes) is no longer experimental. + This endpoint can be used to run interactive long-running protocols between the + client and the daemon. + +## V1.38 API changes + +* `GET /tasks` and `GET /tasks/{id}` now return a `NetworkAttachmentSpec` field, + containing the `ContainerID` for non-service containers connected to "attachable" + swarm-scoped networks. + +## v1.37 API changes + +* `POST /containers/create` and `POST /services/create` now supports exposing SCTP ports. +* `POST /configs/create` and `POST /configs/{id}/create` now accept a `Templating` driver. +* `GET /configs` and `GET /configs/{id}` now return the `Templating` driver of the config. +* `POST /secrets/create` and `POST /secrets/{id}/create` now accept a `Templating` driver. +* `GET /secrets` and `GET /secrets/{id}` now return the `Templating` driver of the secret. + +## v1.36 API changes + +* `Get /events` now return `exec_die` event when an exec process terminates. + + +## v1.35 API changes + +* `POST /services/create` and `POST /services/(id)/update` now accepts an + `Isolation` field on container spec to set the Isolation technology of the + containers running the service (`default`, `process`, or `hyperv`). This + configuration is only used for Windows containers. +* `GET /containers/(name)/logs` now supports an additional query parameter: `until`, + which returns log lines that occurred before the specified timestamp. +* `POST /containers/{id}/exec` now accepts a `WorkingDir` property to set the + work-dir for the exec process, independent of the container's work-dir. +* `Get /version` now returns a `Platform.Name` field, which can be used by products + using Moby as a foundation to return information about the platform. +* `Get /version` now returns a `Components` field, which can be used to return + information about the components used. Information about the engine itself is + now included as a "Component" version, and contains all information from the + top-level `Version`, `GitCommit`, `APIVersion`, `MinAPIVersion`, `GoVersion`, + `Os`, `Arch`, `BuildTime`, `KernelVersion`, and `Experimental` fields. Going + forward, the information from the `Components` section is preferred over their + top-level counterparts. + + +## v1.34 API changes + +* `POST /containers/(name)/wait?condition=removed` now also also returns + in case of container removal failure. A pointer to a structure named + `Error` added to the response JSON in order to indicate a failure. + If `Error` is `null`, container removal has succeeded, otherwise + the test of an error message indicating why container removal has failed + is available from `Error.Message` field. + +## v1.33 API changes + +* `GET /events` now supports filtering 4 more kinds of events: `config`, `node`, +`secret` and `service`. + +## v1.32 API changes + +* `POST /images/create` now accepts a `platform` parameter in the form of `os[/arch[/variant]]`. +* `POST /containers/create` now accepts additional values for the + `HostConfig.IpcMode` property. New values are `private`, `shareable`, + and `none`. +* `DELETE /networks/{id or name}` fixed issue where a `name` equal to another + network's name was able to mask that `id`. If both a network with the given + _name_ exists, and a network with the given _id_, the network with the given + _id_ is now deleted. This change is not versioned, and affects all API versions + if the daemon has this patch. + +## v1.31 API changes + +* `DELETE /secrets/(name)` now returns status code 404 instead of 500 when the secret does not exist. +* `POST /secrets/create` now returns status code 409 instead of 500 when creating an already existing secret. +* `POST /secrets/create` now accepts a `Driver` struct, allowing the + `Name` and driver-specific `Options` to be passed to store a secrets + in an external secrets store. The `Driver` property can be omitted + if the default (internal) secrets store is used. +* `GET /secrets/(id)` and `GET /secrets` now return a `Driver` struct, + containing the `Name` and driver-specific `Options` of the external + secrets store used to store the secret. The `Driver` property is + omitted if no external store is used. +* `POST /secrets/(name)/update` now returns status code 400 instead of 500 when updating a secret's content which is not the labels. +* `POST /nodes/(name)/update` now returns status code 400 instead of 500 when demoting last node fails. +* `GET /networks/(id or name)` now takes an optional query parameter `scope` that will filter the network based on the scope (`local`, `swarm`, or `global`). +* `POST /session` is a new endpoint that can be used for running interactive long-running protocols between client and + the daemon. This endpoint is experimental and only available if the daemon is started with experimental features + enabled. +* `GET /images/(name)/get` now includes an `ImageMetadata` field which contains image metadata that is local to the engine and not part of the image config. +* `POST /services/create` now accepts a `PluginSpec` when `TaskTemplate.Runtime` is set to `plugin` +* `GET /events` now supports config events `create`, `update` and `remove` that are emitted when users create, update or remove a config +* `GET /volumes/` and `GET /volumes/{name}` now return a `CreatedAt` field, + containing the date/time the volume was created. This field is omitted if the + creation date/time for the volume is unknown. For volumes with scope "global", + this field represents the creation date/time of the local _instance_ of the + volume, which may differ from instances of the same volume on different nodes. +* `GET /system/df` now returns a `CreatedAt` field for `Volumes`. Refer to the + `/volumes/` endpoint for a description of this field. + +## v1.30 API changes + +* `GET /info` now returns the list of supported logging drivers, including plugins. +* `GET /info` and `GET /swarm` now returns the cluster-wide swarm CA info if the node is in a swarm: the cluster root CA certificate, and the cluster TLS + leaf certificate issuer's subject and public key. It also displays the desired CA signing certificate, if any was provided as part of the spec. +* `POST /build/` now (when not silent) produces an `Aux` message in the JSON output stream with payload `types.BuildResult` for each image produced. The final such message will reference the image resulting from the build. +* `GET /nodes` and `GET /nodes/{id}` now returns additional information about swarm TLS info if the node is part of a swarm: the trusted root CA, and the + issuer's subject and public key. +* `GET /distribution/(name)/json` is a new endpoint that returns a JSON output stream with payload `types.DistributionInspect` for an image name. It includes a descriptor with the digest, and supported platforms retrieved from directly contacting the registry. +* `POST /swarm/update` now accepts 3 additional parameters as part of the swarm spec's CA configuration; the desired CA certificate for + the swarm, the desired CA key for the swarm (if not using an external certificate), and an optional parameter to force swarm to + generate and rotate to a new CA certificate/key pair. +* `POST /service/create` and `POST /services/(id or name)/update` now take the field `Platforms` as part of the service `Placement`, allowing to specify platforms supported by the service. +* `POST /containers/(name)/wait` now accepts a `condition` query parameter to indicate which state change condition to wait for. Also, response headers are now returned immediately to acknowledge that the server has registered a wait callback for the client. +* `POST /swarm/init` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic +* `POST /swarm/join` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic +* `GET /events` now supports service, node and secret events which are emitted when users create, update and remove service, node and secret +* `GET /events` now supports network remove event which is emitted when users remove a swarm scoped network +* `GET /events` now supports a filter type `scope` in which supported value could be swarm and local +* `PUT /containers/(name)/archive` now accepts a `copyUIDGID` parameter to allow copy UID/GID maps to dest file or dir. + +## v1.29 API changes + +* `DELETE /networks/(name)` now allows to remove the ingress network, the one used to provide the routing-mesh. +* `POST /networks/create` now supports creating the ingress network, by specifying an `Ingress` boolean field. As of now this is supported only when using the overlay network driver. +* `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one. +* `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`). +* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass. +* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. +* `POST /containers/prune`, `POST /images/prune`, `POST /volumes/prune`, and `POST /networks/prune` now support a `label` filter to filter containers, images, volumes, or networks based on the label. The format of the label filter could be `label=<key>`/`label=<key>=<value>` to remove those with the specified labels, or `label!=<key>`/`label!=<key>=<value>` to remove those without the specified labels. +* `POST /services/create` now accepts `Privileges` as part of `ContainerSpec`. Privileges currently include + `CredentialSpec` and `SELinuxContext`. + +## v1.28 API changes + +* `POST /containers/create` now includes a `Consistency` field to specify the consistency level for each `Mount`, with possible values `default`, `consistent`, `cached`, or `delegated`. +* `GET /containers/create` now takes a `DeviceCgroupRules` field in `HostConfig` allowing to set custom device cgroup rules for the created container. +* Optional query parameter `verbose` for `GET /networks/(id or name)` will now list all services with all the tasks, including the non-local tasks on the given network. +* `GET /containers/(id or name)/attach/ws` now returns WebSocket in binary frame format for API version >= v1.28, and returns WebSocket in text frame format for API version< v1.28, for the purpose of backward-compatibility. +* `GET /networks` is optimised only to return list of all networks and network specific information. List of all containers attached to a specific network is removed from this API and is only available using the network specific `GET /networks/{network-id}`. +* `GET /containers/json` now supports `publish` and `expose` filters to filter containers that expose or publish certain ports. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `ReadOnly` parameter, which mounts the container's root filesystem as read only. +* `POST /build` now accepts `extrahosts` parameter to specify a host to ip mapping to use during the build. +* `POST /services/create` and `POST /services/(id or name)/update` now accept a `rollback` value for `FailureAction`. +* `POST /services/create` and `POST /services/(id or name)/update` now accept an optional `RollbackConfig` object which specifies rollback options. +* `GET /services` now supports a `mode` filter to filter services based on the service mode (either `global` or `replicated`). +* `POST /containers/(name)/update` now supports updating `NanoCpus` that represents CPU quota in units of 10<sup>-9</sup> CPUs. +* `POST /plugins/{name}/disable` now accepts a `force` query-parameter to disable a plugin even if still in use. + +## v1.27 API changes + +* `GET /containers/(id or name)/stats` now includes an `online_cpus` field in both `precpu_stats` and `cpu_stats`. If this field is `nil` then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. + +## v1.26 API changes + +* `POST /plugins/(plugin name)/upgrade` upgrade a plugin. + +## v1.25 API changes + +* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`. +* `GET /version` now returns `MinAPIVersion`. +* `POST /build` accepts `networkmode` parameter to specify network used during build. +* `GET /images/(name)/json` now returns `OsVersion` if populated +* `GET /images/(name)/json` no longer contains the `RootFS.BaseLayer` field. This + field was used for Windows images that used a base-image that was pre-installed + on the host (`RootFS.Type` `layers+base`), which is no longer supported, and + the `RootFS.BaseLayer` field has been removed. +* `GET /info` now returns `Isolation`. +* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits. +* `GET /containers/json` and `GET /containers/(id or name)/json` now return `"removing"` as a value for the `State.Status` field if the container is being removed. Previously, "exited" was returned as status. +* `GET /containers/json` now accepts `removing` as a valid value for the `status` filter. +* `GET /containers/json` now supports filtering containers by `health` status. +* `DELETE /volumes/(name)` now accepts a `force` query parameter to force removal of volumes that were already removed out of band by the volume driver plugin. +* `POST /containers/create/` and `POST /containers/(name)/update` now validates restart policies. +* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`). +* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds`, `Volumes`, and `Tmpfs`. *note*: `Binds`, `Volumes`, and `Tmpfs` are still available and can be combined with `Mounts`. +* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions. +* `POST /build` accepts `cachefrom` parameter to specify images used for build cache. +* `GET /networks/` endpoint now correctly returns a list of *all* networks, + instead of the default network if a trailing slash is provided, but no `name` + or `id`. +* `DELETE /containers/(name)` endpoint now returns an error of `removal of container name is already in progress` with status code of 400, when container name is in a state of removal in progress. +* `GET /containers/json` now supports a `is-task` filter to filter + containers that are tasks (part of a service in swarm mode). +* `POST /containers/create` now takes `StopTimeout` field. +* `POST /services/create` and `POST /services/(id or name)/update` now accept `Monitor` and `MaxFailureRatio` parameters, which control the response to failures during service updates. +* `POST /services/(id or name)/update` now accepts a `ForceUpdate` parameter inside the `TaskTemplate`, which causes the service to be updated even if there are no changes which would ordinarily trigger an update. +* `POST /services/create` and `POST /services/(id or name)/update` now return a `Warnings` array. +* `GET /networks/(name)` now returns field `Created` in response to show network created time. +* `POST /containers/(id or name)/exec` now accepts an `Env` field, which holds a list of environment variables to be set in the context of the command execution. +* `GET /volumes`, `GET /volumes/(name)`, and `POST /volumes/create` now return the `Options` field which holds the driver specific options to use for when creating the volume. +* `GET /exec/(id)/json` now returns `Pid`, which is the system pid for the exec'd process. +* `POST /containers/prune` prunes stopped containers. +* `POST /images/prune` prunes unused images. +* `POST /volumes/prune` prunes unused volumes. +* `POST /networks/prune` prunes unused networks. +* Every API response now includes a `Docker-Experimental` header specifying if experimental features are enabled (value can be `true` or `false`). +* Every API response now includes a `API-Version` header specifying the default API version of the server. +* The `hostConfig` option now accepts the fields `CpuRealtimePeriod` and `CpuRtRuntime` to allocate cpu runtime to rt tasks when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel. +* The `SecurityOptions` field within the `GET /info` response now includes `userns` if user namespaces are enabled in the daemon. +* `GET /nodes` and `GET /node/(id or name)` now return `Addr` as part of a node's `Status`, which is the address that that node connects to the manager from. +* The `HostConfig` field now includes `NanoCpus` that represents CPU quota in units of 10<sup>-9</sup> CPUs. +* `GET /info` now returns more structured information about security options. +* The `HostConfig` field now includes `CpuCount` that represents the number of CPUs available for execution by the container. Windows daemon only. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. +* `POST /services/create` and `POST /services/(id or name)/update` now support + `node.platform.arch` and `node.platform.os` constraints in the services + `TaskSpec.Placement.Constraints` field. +* `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. +* `GET /plugins` list plugins. +* `POST /plugins/pull?name=<plugin name>` pulls a plugin. +* `GET /plugins/(plugin name)` inspect a plugin. +* `POST /plugins/(plugin name)/set` configure a plugin. +* `POST /plugins/(plugin name)/enable` enable a plugin. +* `POST /plugins/(plugin name)/disable` disable a plugin. +* `POST /plugins/(plugin name)/push` push a plugin. +* `POST /plugins/create?name=(plugin name)` create a plugin. +* `DELETE /plugins/(plugin name)` delete a plugin. +* `POST /node/(id or name)/update` now accepts both `id` or `name` to identify the node to update. +* `GET /images/json` now support a `reference` filter. +* `GET /secrets` returns information on the secrets. +* `POST /secrets/create` creates a secret. +* `DELETE /secrets/{id}` removes the secret `id`. +* `GET /secrets/{id}` returns information on the secret `id`. +* `POST /secrets/{id}/update` updates the secret `id`. +* `POST /services/(id or name)/update` now accepts service name or prefix of service id as a parameter. +* `POST /containers/create` added 2 built-in log-opts that work on all logging drivers, + `mode` (`blocking`|`non-blocking`), and `max-buffer-size` (e.g. `2m`) which enables a non-blocking log buffer. +* `POST /containers/create` now takes `HostConfig.Init` field to run an init + inside the container that forwards signals and reaps processes. + +## v1.24 API changes + +* `POST /containers/create` now takes `StorageOpt` field. +* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported. +* `GET /info` no longer returns the `ExecutionDriver` property. This property was no longer used after integration + with ContainerD in Docker 1.11. +* `GET /networks` now supports filtering by `label` and `driver`. +* `GET /containers/json` now supports filtering containers by `network` name or id. +* `POST /containers/create` now takes `IOMaximumBandwidth` and `IOMaximumIOps` fields. Windows daemon only. +* `POST /containers/create` now returns an HTTP 400 "bad parameter" message + if no command is specified (instead of an HTTP 500 "server error") +* `GET /images/search` now takes a `filters` query parameter. +* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. +* `GET /events` now supports filtering by daemon name or ID. +* `GET /events` now supports a `detach` event that is emitted on detaching from container process. +* `GET /events` now supports an `exec_detach ` event that is emitted on detaching from exec process. +* `GET /images/json` now supports filters `since` and `before`. +* `POST /containers/(id or name)/start` no longer accepts a `HostConfig`. +* `POST /images/(name)/tag` no longer has a `force` query parameter. +* `GET /images/search` now supports maximum returned search results `limit`. +* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version. +* API errors are now returned as JSON instead of plain text. +* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container. +* `POST /containers/<container ID>/exec` and `POST /exec/<exec ID>/start` + no longer expects a "Container" field to be present. This property was not used + and is no longer sent by the docker client. +* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname). +* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:<name|id>`, + to have the container join the PID namespace of an existing container. + +## v1.23 API changes + +* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`. +* `GET /containers/json` returns the mount points for the container. +* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not. +* `GET /networks/(name)` now returns an `EnableIPv6` field showing whether the network has ipv6 enabled or not. +* `POST /containers/(name)/update` now supports updating container's restart policy. +* `POST /networks/create` now supports enabling ipv6 on the network by setting the `EnableIPv6` field (doing this with a label will no longer work). +* `GET /info` now returns `CgroupDriver` field showing what cgroup driver the daemon is using; `cgroupfs` or `systemd`. +* `GET /info` now returns `KernelMemory` field, showing if "kernel memory limit" is supported. +* `POST /containers/create` now takes `PidsLimit` field, if the kernel is >= 4.3 and the pids cgroup is supported. +* `GET /containers/(id or name)/stats` now returns `pids_stats`, if the kernel is >= 4.3 and the pids cgroup is supported. +* `POST /containers/create` now allows you to override usernamespaces remapping and use privileged options for the container. +* `POST /containers/create` now allows specifying `nocopy` for named volumes, which disables automatic copying from the container path to the volume. +* `POST /auth` now returns an `IdentityToken` when supported by a registry. +* `POST /containers/create` with both `Hostname` and `Domainname` fields specified will result in the container's hostname being set to `Hostname`, rather than `Hostname.Domainname`. +* `GET /volumes` now supports more filters, new added filters are `name` and `driver`. +* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs. +* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details. + +## v1.22 API changes + +* The `HostConfig.LxcConf` field has been removed, and is no longer available on + `POST /containers/create` and `GET /containers/(id)/json`. +* `POST /container/(name)/update` updates the resources of a container. +* `GET /containers/json` supports filter `isolation` on Windows. +* `GET /containers/json` now returns the list of networks of containers. +* `GET /info` Now returns `Architecture` and `OSType` fields, providing information + about the host architecture and operating system type that the daemon runs on. +* `GET /networks/(name)` now returns a `Name` field for each container attached to the network. +* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it + consistent with other date/time values returned by the API. +* `AuthConfig` now supports a `registrytoken` for token based authentication +* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` +* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` + will be cancelled if the HTTP connection making the API request is closed before + the push or pull completes. +* `POST /containers/create` now allows you to set a read/write rate limit for a + device (in bytes per second or IO per second). +* `GET /networks` now supports filtering by `name`, `id` and `type`. +* `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `GET /info` now includes the number of containers running, stopped, and paused. +* `POST /networks/create` now supports restricting external access to the network by setting the `Internal` field. +* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network +* `GET /containers/(id)/json` now returns the `NetworkID` of containers. +* `POST /networks/create` Now supports an options field in the IPAM config that provides options + for custom IPAM plugins. +* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any + are available. +* `GET /networks/<network-id>` now returns subnets info for user-defined networks. +* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications + that are built on top of engine. + +## v1.21 API changes + +* `GET /volumes` lists volumes from all volume drivers. +* `POST /volumes/create` to create a volume. +* `GET /volumes/(name)` get low-level information about a volume. +* `DELETE /volumes/(name)` remove a volume with the specified name. +* `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. +* `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. +* The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. +* `GET /containers/(id)/stats` will return networking information respectively for each interface. +* The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. +* `POST /build` now optionally takes a serialized map of build-time variables. +* `GET /events` now includes a `timenano` field, in addition to the existing `time` field. +* `GET /events` now supports filtering by image and container labels. +* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. +* `GET /containers/json` will return `ImageID` of the image used by container. +* `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. +* `POST /containers/create` now takes `KernelMemory` in HostConfig to specify kernel memory limit. +* `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. +* `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, + detailing network settings per network. This field deprecates the + `NetworkSettings.EndpointID`, `NetworkSettings.Gateway`, `NetworkSettings.GlobalIPv6Address`, + `NetworkSettings.GlobalIPv6PrefixLen` `NetworkSettings.IPAddress`, `NetworkSettings.IPPrefixLen`, + `NetworkSettings.IPv6Gateway`, `NetworkSettings.MacAddress` fields, which are + still returned for backward-compatibility, but will be removed in a future version. +* `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, + detailing networksettings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the + badness heuristic. This heuristic selects which processes the OOM killer kills + under out-of-memory conditions. + +## v1.20 API changes + +* `GET /containers/(id)/archive` get an archive of filesystem content from a container. +* `PUT /containers/(id)/archive` upload an archive of content to be extracted to +an existing directory inside a container's filesystem. +* `POST /containers/(id)/copy` is deprecated in favor of the above `archive` +endpoint which can be used to download files and directories from a container. +* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a +list of additional groups that the container process will run as. + +## v1.19 API changes + +* When the daemon detects a version mismatch with the client, usually when +the client is newer than the daemon, an HTTP 400 is now returned instead +of a 404. +* `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. +* `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. +* `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and +`SwapLimit` are now returned as boolean instead of as an int. In addition, the +end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and +`OomKillDisable`. +* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` +* `POST /build` accepts `cpuperiod` and `cpuquota` options + +## v1.18 API changes + +* `GET /version` now returns `Os`, `Arch` and `KernelVersion`. +* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. +* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. +* `GET /images/json` added a `RepoDigests` field to include image digest information. +* `POST /build` can now set resource constraints for all containers created for the build. +* `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. +* `POST /build` closing the HTTP request cancels the build +* `POST /containers/(id)/exec` includes `Warnings` field to response. diff --git a/_vendor/github.com/moby/moby/api/docs/README.md b/_vendor/github.com/moby/moby/api/docs/README.md new file mode 100644 index 00000000000..46bf79bdbef --- /dev/null +++ b/_vendor/github.com/moby/moby/api/docs/README.md @@ -0,0 +1,26 @@ +# API Documentation + +This directory contains versioned documents for each version of the API +specification supported by this module. While this module provides support +for older API versions, support should be considered "best-effort", especially +for very old versions. Users are recommended to use the latest API versions, +and only rely on older API versions for compatibility with older clients. + +Newer API versions tend to be backward-compatible with older versions, +with some exceptions where features were deprecated. For an overview +of changes for each version, refer to [CHANGELOG.md](CHANGELOG.md). + +The latest version of the API specification can be found [at the root directory +of this module](../swagger.yaml) which may contain unreleased changes. + +For API version v1.24, documentation is only available in markdown +format, for later versions [Swagger (OpenAPI) v2.0](https://swagger.io/specification/v2/) +specifications can be found in this directory. The Moby project itself +primarily uses these swagger files to produce the API documentation; +while we attempt to make these files match the actual implementation, +the OpenAPI 2.0 specification has limitations that prevent us from +expressing all options provided. There may be discrepancies (for which +we welcome contributions). If you find bugs, or discrepancies, please +open a ticket (or pull request). + + diff --git a/_vendor/github.com/moby/moby/docs/api/v1.24.md b/_vendor/github.com/moby/moby/api/docs/v1.24.md similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.24.md rename to _vendor/github.com/moby/moby/api/docs/v1.24.md index 45b4b3fdbb9..2d374a4a848 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.24.md +++ b/_vendor/github.com/moby/moby/api/docs/v1.24.md @@ -310,7 +310,6 @@ Create a container "Memory": 0, "MemorySwap": 0, "MemoryReservation": 0, - "KernelMemory": 0, "CpuPercent": 80, "CpuShares": 512, "CpuPeriod": 100000, @@ -438,7 +437,6 @@ Create a container - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **MemoryReservation** - Memory soft limit in bytes. - - **KernelMemory** - Kernel memory limit in bytes. - **CpuPercent** - An integer value containing the usable percentage of the available CPUs. (Windows daemon only) - **CpuShares** - An integer value containing the container's CPU Shares (ie. the relative weight vs other containers). @@ -627,7 +625,6 @@ Return low-level information on the container `id` "Memory": 0, "MemorySwap": 0, "MemoryReservation": 0, - "KernelMemory": 0, "OomKillDisable": false, "OomScoreAdj": 500, "NetworkMode": "bridge", @@ -1197,7 +1194,6 @@ Update configuration of one or more containers. "Memory": 314572800, "MemorySwap": 514288000, "MemoryReservation": 209715200, - "KernelMemory": 52428800, "RestartPolicy": { "MaximumRetryCount": 4, "Name": "on-failure" @@ -1830,8 +1826,7 @@ a base64-encoded AuthConfig object. ``` { "username": "jdoe", - "password": "secret", - "email": "jdoe@acme.com" + "password": "secret" } ``` @@ -2066,8 +2061,7 @@ The push is cancelled if the HTTP connection is closed. ``` { "username": "jdoe", - "password": "secret", - "email": "jdoe@acme.com", + "password": "secret" } ``` @@ -2498,8 +2492,6 @@ Docker daemon report the following event: Transfer-Encoding: chunked { - "status": "pull", - "id": "alpine:latest", "Type": "image", "Action": "pull", "Actor": { @@ -2512,9 +2504,6 @@ Docker daemon report the following event: "timeNano": 1461943101301854122 } { - "status": "create", - "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", - "from": "alpine", "Type": "container", "Action": "create", "Actor": { @@ -2529,9 +2518,6 @@ Docker daemon report the following event: "timeNano": 1461943101381709551 } { - "status": "attach", - "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", - "from": "alpine", "Type": "container", "Action": "attach", "Actor": { @@ -2560,9 +2546,6 @@ Docker daemon report the following event: "timeNano": 1461943101394865557 } { - "status": "start", - "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", - "from": "alpine", "Type": "container", "Action": "start", "Actor": { @@ -2577,9 +2560,6 @@ Docker daemon report the following event: "timeNano": 1461943101607533796 } { - "status": "resize", - "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", - "from": "alpine", "Type": "container", "Action": "resize", "Actor": { @@ -2596,9 +2576,6 @@ Docker daemon report the following event: "timeNano": 1461943101610269268 } { - "status": "die", - "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", - "from": "alpine", "Type": "container", "Action": "die", "Actor": { @@ -2628,9 +2605,6 @@ Docker daemon report the following event: "timeNano": 1461943105230860245 } { - "status": "destroy", - "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", - "from": "alpine", "Type": "container", "Action": "destroy", "Actor": { diff --git a/_vendor/github.com/moby/moby/docs/api/v1.25.yaml b/_vendor/github.com/moby/moby/api/docs/v1.25.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.25.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.25.yaml index 8a57a98d872..ea8d12a0e18 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.25.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.25.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -130,6 +129,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -156,6 +183,20 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -167,13 +208,10 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -257,19 +295,20 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -320,7 +359,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: description: | @@ -432,10 +474,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -984,6 +1022,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -1975,6 +2017,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Networks: type: "array" items: @@ -2712,7 +2755,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -3012,7 +3054,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -3693,7 +3734,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -4602,24 +4642,7 @@ paths: schema: type: "array" items: - type: "object" - properties: - Id: - type: "string" - Created: - type: "integer" - format: "int64" - CreatedBy: - type: "string" - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - Comment: - type: "string" + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -4712,7 +4735,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: @@ -5100,7 +5128,6 @@ paths: IndexServerAddress: "https://index.docker.io/v1/" InitPath: "/usr/bin/docker" InitSha1: "" - KernelMemory: true KernelVersion: "3.12.0-1-amd64" Labels: - "storage=ssd" diff --git a/_vendor/github.com/moby/moby/docs/api/v1.26.yaml b/_vendor/github.com/moby/moby/api/docs/v1.26.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.26.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.26.yaml index 8de146c0733..46af01c0033 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.26.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.26.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -130,6 +129,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -156,6 +183,20 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -167,13 +208,10 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -257,19 +295,20 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -320,7 +359,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: description: | @@ -432,10 +474,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -984,6 +1022,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -1979,6 +2021,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Networks: type: "array" items: @@ -2716,7 +2759,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -4610,24 +4652,7 @@ paths: schema: type: "array" items: - type: "object" - properties: - Id: - type: "string" - Created: - type: "integer" - format: "int64" - CreatedBy: - type: "string" - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - Comment: - type: "string" + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -4720,7 +4745,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.27.yaml b/_vendor/github.com/moby/moby/api/docs/v1.27.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.27.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.27.yaml index 3e4c1167cf5..ef80c44db71 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.27.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.27.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -132,6 +131,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -158,6 +185,20 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -169,13 +210,10 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -259,19 +297,20 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -322,7 +361,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: description: | @@ -434,10 +476,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -504,7 +542,9 @@ definitions: format: "int64" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -515,6 +555,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -989,6 +1035,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2051,6 +2101,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Networks: type: "array" items: @@ -2775,7 +2826,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -3075,7 +3125,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -4680,24 +4729,7 @@ paths: schema: type: "array" items: - type: "object" - properties: - Id: - type: "string" - Created: - type: "integer" - format: "int64" - CreatedBy: - type: "string" - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - Comment: - type: "string" + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -4790,7 +4822,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.28.yaml b/_vendor/github.com/moby/moby/api/docs/v1.28.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.28.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.28.yaml index 34ad0b83911..57fa2ddeacf 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.28.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.28.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -132,6 +131,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -158,6 +185,20 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -169,13 +210,10 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -259,19 +297,20 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -322,7 +361,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: description: | @@ -440,10 +482,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -510,7 +548,9 @@ definitions: format: "int64" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -521,6 +561,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1027,6 +1073,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2104,6 +2154,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Networks: type: "array" items: @@ -2864,7 +2915,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -3164,7 +3214,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -3855,7 +3904,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -4781,31 +4829,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -4898,7 +4922,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: @@ -5287,7 +5316,6 @@ paths: IndexServerAddress: "https://index.docker.io/v1/" InitPath: "/usr/bin/docker" InitSha1: "" - KernelMemory: true KernelVersion: "3.12.0-1-amd64" Labels: - "storage=ssd" diff --git a/_vendor/github.com/moby/moby/docs/api/v1.29.yaml b/_vendor/github.com/moby/moby/api/docs/v1.29.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.29.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.29.yaml index 0895a92ed22..ebec0c4c606 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.29.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.29.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -132,6 +131,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -158,6 +185,20 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -169,13 +210,10 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -259,19 +297,20 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -325,7 +364,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: description: | @@ -443,10 +485,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -513,7 +551,9 @@ definitions: format: "int64" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -524,6 +564,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1033,6 +1079,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2125,6 +2175,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Networks: type: "array" items: @@ -2897,7 +2948,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -3197,7 +3247,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -3888,7 +3937,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -4814,31 +4862,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -4931,7 +4955,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: @@ -5320,7 +5349,6 @@ paths: IndexServerAddress: "https://index.docker.io/v1/" InitPath: "/usr/bin/docker" InitSha1: "" - KernelMemory: true KernelVersion: "3.12.0-1-amd64" Labels: - "storage=ssd" diff --git a/_vendor/github.com/moby/moby/docs/api/v1.30.yaml b/_vendor/github.com/moby/moby/api/docs/v1.30.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.30.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.30.yaml index 8ab1764f047..099f08b2d2d 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.30.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.30.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -132,6 +131,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -158,6 +185,20 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -169,13 +210,10 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -259,20 +297,20 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -326,7 +364,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: description: | @@ -445,10 +486,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -515,7 +552,9 @@ definitions: format: "int64" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -526,6 +565,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1043,6 +1088,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2303,6 +2352,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -3110,7 +3160,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -5056,31 +5105,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -5173,7 +5198,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.31.yaml b/_vendor/github.com/moby/moby/api/docs/v1.31.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.31.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.31.yaml index 9a5efd28de1..164b454e5c6 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.31.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.31.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -132,6 +131,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -158,6 +185,20 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -169,13 +210,10 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -259,20 +297,20 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -326,7 +364,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: description: | @@ -445,10 +486,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -515,7 +552,9 @@ definitions: format: "int64" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -526,6 +565,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1049,6 +1094,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2332,6 +2381,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -2857,11 +2907,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -2902,8 +2952,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Config: @@ -3183,7 +3233,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -5150,31 +5199,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -5267,7 +5292,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.32.yaml b/_vendor/github.com/moby/moby/api/docs/v1.32.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.32.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.32.yaml index 99823282e56..6e05d83f7bb 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.32.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.32.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -132,6 +131,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -158,6 +185,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -169,15 +212,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -261,20 +300,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -328,7 +370,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -449,10 +494,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -566,7 +607,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -577,6 +620,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1308,6 +1357,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2783,6 +2836,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -3329,11 +3383,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3375,8 +3429,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" @@ -3518,10 +3572,6 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: - description: "Indicates if the host has kernel memory limit support enabled." - type: "boolean" - example: true CpuCfsPeriod: description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." type: "boolean" @@ -3621,10 +3671,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -4395,7 +4448,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -4703,7 +4755,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -5392,7 +5443,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -6361,31 +6411,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -6478,7 +6504,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.33.yaml b/_vendor/github.com/moby/moby/api/docs/v1.33.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.33.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.33.yaml index 4d49c4156bd..9ba99a65161 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.33.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.33.yaml @@ -60,7 +60,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -136,6 +135,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -162,6 +189,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -173,15 +216,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -265,20 +304,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -332,7 +374,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -453,10 +498,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -570,7 +611,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -581,6 +624,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1312,6 +1361,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2787,6 +2840,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -3333,11 +3387,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3379,8 +3433,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" @@ -3522,10 +3576,6 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: - description: "Indicates if the host has kernel memory limit support enabled." - type: "boolean" - example: true CpuCfsPeriod: description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." type: "boolean" @@ -3625,10 +3675,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -4399,7 +4452,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -4707,7 +4759,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -5396,7 +5447,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -6365,31 +6415,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -6482,7 +6508,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.34.yaml b/_vendor/github.com/moby/moby/api/docs/v1.34.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.34.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.34.yaml index 2d0d987fb2b..9791708705c 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.34.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.34.yaml @@ -62,7 +62,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -138,6 +137,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -164,6 +191,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -175,15 +218,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -267,20 +306,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -334,7 +376,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -455,10 +500,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -572,7 +613,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -583,6 +626,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1322,6 +1371,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2797,6 +2850,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -3361,11 +3415,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3407,8 +3461,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" @@ -3550,10 +3604,6 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: - description: "Indicates if the host has kernel memory limit support enabled." - type: "boolean" - example: true CpuCfsPeriod: description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." type: "boolean" @@ -3653,10 +3703,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -4427,7 +4480,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -4735,7 +4787,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -5424,7 +5475,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -6405,31 +6455,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -6522,7 +6548,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.35.yaml b/_vendor/github.com/moby/moby/api/docs/v1.35.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.35.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.35.yaml index e7de26d46d7..03bb17201a9 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.35.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.35.yaml @@ -71,7 +71,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -147,6 +146,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -173,6 +200,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -184,15 +227,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -276,20 +315,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -344,7 +386,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -466,10 +511,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -583,7 +624,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -594,6 +637,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1319,6 +1368,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2801,6 +2854,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -3365,11 +3419,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3411,8 +3465,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" @@ -3554,10 +3608,6 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: - description: "Indicates if the host has kernel memory limit support enabled." - type: "boolean" - example: true CpuCfsPeriod: description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." type: "boolean" @@ -3657,10 +3707,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -4431,7 +4484,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -4739,7 +4791,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -5433,7 +5484,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -6414,31 +6464,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -6531,7 +6557,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.36.yaml b/_vendor/github.com/moby/moby/api/docs/v1.36.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.36.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.36.yaml index d39839373f8..ab036218c8e 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.36.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.36.yaml @@ -71,7 +71,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -147,6 +146,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -173,6 +200,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -184,15 +227,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -276,20 +315,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -344,7 +386,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -583,7 +628,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -594,6 +641,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1319,6 +1372,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2814,6 +2871,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -3378,11 +3436,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3424,8 +3482,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" @@ -3670,10 +3728,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -6440,33 +6501,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -6559,7 +6594,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.37.yaml b/_vendor/github.com/moby/moby/api/docs/v1.37.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.37.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.37.yaml index 014086b9d55..71c292576d6 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.37.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.37.yaml @@ -71,7 +71,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -147,6 +146,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -173,6 +200,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -184,15 +227,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -276,20 +315,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -344,7 +386,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -587,7 +632,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -598,6 +645,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1322,6 +1375,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2817,6 +2874,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -3384,11 +3442,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3437,8 +3495,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -3690,10 +3748,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -6483,33 +6544,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -6602,7 +6637,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.38.yaml b/_vendor/github.com/moby/moby/api/docs/v1.38.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.38.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.38.yaml index 23555a87ac4..5ebc08b5b3e 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.38.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.38.yaml @@ -71,7 +71,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -147,6 +146,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -174,6 +201,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -185,15 +228,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -277,20 +316,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -345,7 +387,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -467,10 +512,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -588,7 +629,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -599,6 +642,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -1333,6 +1382,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2871,6 +2924,7 @@ definitions: ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + format: "uint64" Runtime: description: "Runtime is the type of runtime specified for the task executor." type: "string" @@ -3438,11 +3492,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3491,8 +3545,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -3641,10 +3695,6 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: - description: "Indicates if the host has kernel memory limit support enabled." - type: "boolean" - example: true CpuCfsPeriod: description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." type: "boolean" @@ -3744,10 +3794,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -4518,7 +4571,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -4836,7 +4888,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -5543,7 +5594,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -6554,33 +6604,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -6673,7 +6697,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.39.yaml b/_vendor/github.com/moby/moby/api/docs/v1.39.yaml similarity index 99% rename from _vendor/github.com/moby/moby/docs/api/v1.39.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.39.yaml index 2c16eca8860..5ab447c339a 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.39.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.39.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,15 +254,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -303,20 +342,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -371,7 +413,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -530,11 +575,6 @@ definitions: description: "Disk limit (in bytes)." type: "integer" format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" - example: 209715200 MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -676,7 +716,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -687,6 +729,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -700,6 +748,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -2109,6 +2161,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -3913,6 +3969,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -4499,11 +4556,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4554,8 +4611,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -4744,7 +4801,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -4889,10 +4948,6 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: - description: "Indicates if the host has kernel memory limit support enabled." - type: "boolean" - example: true CpuCfsPeriod: description: | Indicates if CPU CFS(Completely Fair Scheduler) period is supported by @@ -4999,10 +5054,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -5823,7 +5881,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -6097,7 +6154,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -6469,7 +6525,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -6835,7 +6892,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -7820,33 +7876,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -7945,7 +7975,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.40.yaml b/_vendor/github.com/moby/moby/api/docs/v1.40.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.40.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.40.yaml index 27414904597..240f6d3de8d 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.40.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.40.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,15 +254,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -340,22 +379,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -414,7 +454,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -727,7 +770,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -738,6 +783,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -751,6 +802,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -2169,6 +2224,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -4039,6 +4098,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -4623,11 +4683,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4678,8 +4738,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -4868,7 +4928,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -5135,10 +5197,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -6775,7 +6840,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -7948,7 +8014,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -8143,33 +8220,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -8268,7 +8319,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.41.yaml b/_vendor/github.com/moby/moby/api/docs/v1.41.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.41.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.41.yaml index 0554b5a719e..fbbc8b5f80d 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.41.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.41.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,22 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,15 +254,11 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -340,22 +379,23 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must exist prior to creating the container. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -414,7 +454,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -754,7 +797,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -765,6 +810,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -778,6 +829,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -2200,6 +2255,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -4204,6 +4263,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -4872,11 +4932,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4927,8 +4987,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5117,7 +5177,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -5370,10 +5432,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -7057,7 +7122,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -8237,7 +8303,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -8432,33 +8509,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -8557,7 +8608,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.42.yaml b/_vendor/github.com/moby/moby/api/docs/v1.42.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.42.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.42.yaml index b31e84af5db..15f37f8e433 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.42.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.42.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,24 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,17 +256,12 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. + - `cluster` a Swarm cluster volume. - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -342,24 +382,25 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -422,7 +463,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -756,7 +800,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -767,6 +813,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -780,6 +832,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -2203,6 +2259,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2616,14 +2676,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -4223,6 +4275,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -4891,11 +4944,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4946,8 +4999,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5156,7 +5209,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -5400,10 +5455,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -7276,7 +7334,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -8487,7 +8546,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -8697,33 +8767,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -8822,7 +8866,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.43.yaml b/_vendor/github.com/moby/moby/api/docs/v1.43.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.43.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.43.yaml index a1cdea0ea5e..4b5a3b1b111 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.43.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.43.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,24 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,17 +256,12 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. + - `cluster` a Swarm cluster volume. - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -342,24 +382,25 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -422,7 +463,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -756,7 +800,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -767,6 +813,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -780,6 +832,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -2234,6 +2290,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2647,14 +2707,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -4254,6 +4306,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -4922,11 +4975,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4977,8 +5030,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5188,7 +5241,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -5432,10 +5487,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -7294,7 +7352,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -8505,7 +8564,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -8715,33 +8785,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -8840,7 +8884,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.44.yaml b/_vendor/github.com/moby/moby/api/docs/v1.44.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.44.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.44.yaml index 8e4e6121e62..f2239095dd2 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.44.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.44.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,24 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,17 +256,12 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. + - `cluster` a Swarm cluster volume. - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -342,24 +382,25 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -432,7 +473,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -766,7 +810,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -777,6 +823,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -790,6 +842,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -2086,14 +2142,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: @@ -2225,14 +2273,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 Labels: description: "User-defined key/value metadata." type: "object" @@ -2261,6 +2301,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2674,14 +2718,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -4322,6 +4358,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -5037,11 +5074,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5092,8 +5129,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5303,7 +5340,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -5547,10 +5586,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -7450,7 +7492,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -8662,7 +8705,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -8872,33 +8926,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -8997,7 +9025,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.45.yaml b/_vendor/github.com/moby/moby/api/docs/v1.45.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.45.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.45.yaml index 56d346fea4c..5ac2e74e48a 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.45.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.45.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,24 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,17 +256,12 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. + - `cluster` a Swarm cluster volume. - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -342,24 +382,25 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -440,7 +481,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" RestartPolicy: @@ -774,7 +818,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -785,6 +831,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -798,6 +850,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -2072,14 +2128,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: @@ -2211,14 +2259,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 Labels: description: "User-defined key/value metadata." type: "object" @@ -2247,6 +2287,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2660,14 +2704,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -4308,6 +4344,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -5023,11 +5060,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5078,8 +5115,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5289,7 +5326,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -5533,10 +5572,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -7436,7 +7478,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -8648,7 +8691,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -8858,33 +8912,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -8983,7 +9011,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.46.yaml b/_vendor/github.com/moby/moby/api/docs/v1.46.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.46.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.46.yaml index 8c4be6c3ce0..6dc713141bc 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.46.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.46.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,24 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,17 +256,12 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. + - `cluster` a Swarm cluster volume. - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -342,24 +382,25 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -440,7 +481,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" Options: description: | @@ -789,7 +833,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -800,6 +846,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -813,6 +865,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -1385,7 +1441,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" Domainname: @@ -1395,7 +1451,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" User: @@ -1409,7 +1465,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1420,7 +1476,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1431,7 +1487,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1458,7 +1514,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1469,7 +1525,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1480,7 +1536,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1517,7 +1573,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1556,7 +1612,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1568,7 +1624,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1602,7 +1658,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "integer" default: 10 x-nullable: true @@ -2099,14 +2155,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: @@ -2239,14 +2287,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 Labels: description: "User-defined key/value metadata." type: "object" @@ -2275,6 +2315,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2706,14 +2750,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -4361,6 +4397,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -5082,11 +5119,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5137,8 +5174,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5348,7 +5385,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -5592,10 +5631,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -7557,7 +7599,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -8769,7 +8812,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -8979,33 +9033,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -9117,7 +9145,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.47.yaml b/_vendor/github.com/moby/moby/api/docs/v1.47.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.47.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.47.yaml index 4eb222a0507..9608751d806 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.47.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.47.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,24 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,17 +256,12 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. + - `cluster` a Swarm cluster volume. - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -342,24 +382,25 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -440,7 +481,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" Options: description: | @@ -789,7 +833,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -800,6 +846,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -813,6 +865,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -1385,7 +1441,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" Domainname: @@ -1395,7 +1451,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" User: @@ -1409,7 +1465,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1420,7 +1476,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1431,7 +1487,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1458,7 +1514,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1469,7 +1525,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1480,7 +1536,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1517,7 +1573,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1556,7 +1612,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1568,7 +1624,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1602,7 +1658,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "integer" default: 10 x-nullable: true @@ -2099,14 +2155,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 GraphDriver: $ref: "#/definitions/DriverData" RootFS: @@ -2239,14 +2287,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 Labels: description: "User-defined key/value metadata." type: "object" @@ -2288,6 +2328,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2724,14 +2768,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -4379,6 +4415,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -5100,11 +5137,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5155,8 +5192,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5366,7 +5403,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -5614,10 +5653,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -7693,7 +7735,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -8910,7 +8953,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -9120,33 +9174,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -9258,7 +9286,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.48.yaml b/_vendor/github.com/moby/moby/api/docs/v1.48.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.48.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.48.yaml index a2901377e5b..353f85cf786 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.48.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.48.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,26 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "image" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,19 +258,13 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `image` a docker image - - `tmpfs` a `tmpfs`. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "image" - - "tmpfs" - - "npipe" - - "cluster" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -344,26 +385,26 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `image` Mounts an image. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "image" - - "tmpfs" - - "npipe" - - "cluster" + - `image` Mounts an image. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -452,7 +493,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" Options: description: | @@ -801,7 +845,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -812,6 +858,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -825,6 +877,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -1435,7 +1491,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" Domainname: @@ -1445,7 +1501,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" User: @@ -1459,7 +1515,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1470,7 +1526,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1481,7 +1537,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1508,7 +1564,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1519,7 +1575,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1530,7 +1586,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1567,7 +1623,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1606,7 +1662,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1618,7 +1674,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1652,7 +1708,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "integer" default: 10 x-nullable: true @@ -2176,14 +2232,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 GraphDriver: $ref: "#/definitions/DriverData" RootFS: @@ -2316,14 +2364,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 Labels: description: "User-defined key/value metadata." type: "object" @@ -2377,6 +2417,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2825,14 +2869,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -3039,7 +3075,8 @@ definitions: be used. If multiple endpoints have the same priority, endpoints are lexicographically sorted based on their network name, and the one that sorts first is picked. - type: "number" + type: "integer" + format: "int64" example: - 10 @@ -4517,6 +4554,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -5508,11 +5546,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5563,8 +5601,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5988,7 +6026,7 @@ definitions: type: "integer" format: "uint64" x-nullable: true - example: 18446744073709551615 + example: "18446744073709551615" ContainerThrottlingData: description: | @@ -6046,7 +6084,11 @@ definitions: example: 0 stats: description: | - All the stats exported via memory.stat. when using cgroups v2. + All the stats exported via memory.stat. + + The fields in this object differ between cgroups v1 and v2. + On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available. + On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available. This field is Linux-specific and omitted for Windows containers. type: "object" @@ -6386,7 +6428,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -6546,7 +6590,7 @@ definitions: > **Deprecated**: netfilter module is now loaded on-demand and no longer > during daemon startup, making this field obsolete. This field is always - > `false` and will be removed in a API v1.49. + > `false` and will be removed in a API v1.50. type: "boolean" example: false BridgeNfIp6tables: @@ -6557,7 +6601,7 @@ definitions: > **Deprecated**: netfilter module is now loaded on-demand, and no longer > during daemon startup, making this field obsolete. This field is always - > `false` and will be removed in a API v1.49. + > `false` and will be removed in a API v1.50. type: "boolean" example: false Debug: @@ -6645,10 +6689,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -8336,7 +8383,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -9454,7 +9502,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -9492,7 +9551,7 @@ paths: Amount of disk space in bytes to keep for cache > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". - > It is kept for backward compatibility and will be removed in API v1.49. + > It is kept for backward compatibility and will be removed in API v1.52. type: "integer" format: "int64" - name: "reserved-space" @@ -9695,33 +9754,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -9847,7 +9880,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.49.yaml b/_vendor/github.com/moby/moby/api/docs/v1.49.yaml similarity index 98% rename from _vendor/github.com/moby/moby/docs/api/v1.49.yaml rename to _vendor/github.com/moby/moby/api/docs/v1.49.yaml index 1183aaf2b59..d361f9df76e 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.49.yaml +++ b/_vendor/github.com/moby/moby/api/docs/v1.49.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -173,6 +172,34 @@ tags: x-displayName: "System" definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false Port: type: "object" description: "An open port on a container" @@ -200,6 +227,26 @@ definitions: PublicPort: 80 Type: "tcp" + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "image" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + MountPoint: type: "object" description: | @@ -211,19 +258,13 @@ definitions: The mount type: - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `image` a docker image - - `tmpfs` a `tmpfs`. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "image" - - "tmpfs" - - "npipe" - - "cluster" + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" example: "volume" Name: description: | @@ -344,26 +385,26 @@ definitions: description: "Container path." type: "string" Source: - description: "Mount source (e.g. a volume name, a host path)." + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `image` Mounts an image. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "image" - - "tmpfs" - - "npipe" - - "cluster" + - `image` Mounts an image. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -452,7 +493,10 @@ definitions: type: "integer" format: "int64" Mode: - description: "The permission mode for the tmpfs mount in an integer." + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). type: "integer" Options: description: | @@ -801,7 +845,9 @@ definitions: Value: "UUID2" HealthConfig: - description: "A test to perform to check that the container is healthy." + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. type: "object" properties: Test: @@ -812,6 +858,12 @@ definitions: - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe type: "array" items: type: "string" @@ -825,6 +877,10 @@ definitions: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: @@ -1435,7 +1491,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" Domainname: @@ -1445,7 +1501,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" User: @@ -1459,7 +1515,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1470,7 +1526,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1481,7 +1537,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1508,7 +1564,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1519,7 +1575,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1530,7 +1586,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1567,7 +1623,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1606,7 +1662,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1618,7 +1674,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1652,7 +1708,7 @@ definitions: <p><br /></p> > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "integer" default: 10 x-nullable: true @@ -2176,14 +2232,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 GraphDriver: $ref: "#/definitions/DriverData" RootFS: @@ -2316,14 +2364,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 Labels: description: "User-defined key/value metadata." type: "object" @@ -2377,6 +2417,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -2825,14 +2869,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -3039,7 +3075,8 @@ definitions: be used. If multiple endpoints have the same priority, endpoints are lexicographically sorted based on their network name, and the one that sorts first is picked. - type: "number" + type: "integer" + format: "int64" example: - 10 @@ -4517,6 +4554,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. @@ -5508,11 +5546,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5563,8 +5601,8 @@ definitions: type: "string" Data: description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: @@ -5988,7 +6026,7 @@ definitions: type: "integer" format: "uint64" x-nullable: true - example: 18446744073709551615 + example: "18446744073709551615" ContainerThrottlingData: description: | @@ -6046,7 +6084,11 @@ definitions: example: 0 stats: description: | - All the stats exported via memory.stat. when using cgroups v2. + All the stats exported via memory.stat. + + The fields in this object differ between cgroups v1 and v2. + On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available. + On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available. This field is Linux-specific and omitted for Windows containers. type: "object" @@ -6386,7 +6428,9 @@ definitions: example: "linux" Arch: description: | - The architecture that the daemon is running on + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: @@ -6546,7 +6590,7 @@ definitions: > **Deprecated**: netfilter module is now loaded on-demand and no longer > during daemon startup, making this field obsolete. This field is always - > `false` and will be removed in a API v1.49. + > `false` and will be removed in a API v1.50. type: "boolean" example: false BridgeNfIp6tables: @@ -6557,7 +6601,7 @@ definitions: > **Deprecated**: netfilter module is now loaded on-demand, and no longer > during daemon startup, making this field obsolete. This field is always - > `false` and will be removed in a API v1.49. + > `false` and will be removed in a API v1.50. type: "boolean" example: false Debug: @@ -6645,10 +6689,13 @@ definitions: example: "linux" Architecture: description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: @@ -8336,7 +8383,8 @@ paths: To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` @@ -9454,7 +9502,18 @@ paths: default: "" - name: "outputs" in: "query" - description: "BuildKit output configuration" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` type: "string" default: "" - name: "version" @@ -9492,7 +9551,7 @@ paths: Amount of disk space in bytes to keep for cache > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". - > It is kept for backward compatibility and will be removed in API v1.49. + > It is kept for backward compatibility and will be removed in API v1.52. type: "integer" format: "int64" - name: "reserved-space" @@ -9678,10 +9737,31 @@ paths: required: true - name: "manifests" in: "query" - description: "Include Manifests in the image summary." + description: |- + Include Manifests in the image summary. + + The `manifests` and `platform` options are mutually exclusive, and + an error is produced if both are set. type: "boolean" default: false required: false + - name: "platform" + type: "string" + in: "query" + description: |- + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show inspect. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + The `platform` and `manifests` options are mutually exclusive, and + an error is produced if both are set. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/{name}/history: get: @@ -9695,33 +9775,7 @@ paths: schema: type: "array" items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false + $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" @@ -9847,7 +9901,12 @@ paths: /images/{name}/tag: post: summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: diff --git a/_vendor/github.com/moby/moby/api/docs/v1.50.yaml b/_vendor/github.com/moby/moby/api/docs/v1.50.yaml new file mode 100644 index 00000000000..55bc1117793 --- /dev/null +++ b/_vendor/github.com/moby/moby/api/docs/v1.50.yaml @@ -0,0 +1,13436 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.50" +info: + title: "Docker Engine API" + version: "1.50" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.50/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "image" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `cluster` a Swarm cluster volume + - `image` Mounts an image. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10<sup>-9</sup> CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:<name|id>`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `<container name>[:<ro|rw>]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:<name|id>"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:<name|id>"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`<user-name|UID>[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. + type: "string" + example: "" + LinkLocalIPv6PrefixLen: + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. + type: "integer" + example: "" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + SecondaryIPAddresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + SecondaryIPv6Addresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `<port>/<protocol>`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "27.0.1" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + GraphDriver: + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + example: false + IPAM: + $ref: "#/definitions/IPAM" + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + example: "container_1" + EndpointID: + type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + example: "02:42:ac:13:00:02" + IPv4Address: + type: "string" + example: "172.19.0.2/16" + IPv6Address: + type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.<network-name>`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + + <p><br /></p> + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + format: "uint64" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + + <p><br /></p> + + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:<id>`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. + + The fields in this object differ between cgroups v1 and v2. + On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available. + On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + + <p><br /></p> + + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + + <p><br /></p> + + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. + + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + + <p><br /></p> + + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + + <p><br /></p> + + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) + - `before`=(`<container id>` or `<container name>`) + - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `exited=<int>` containers with exit code of `<int>` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=<ID>` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=<name>` a container's name + - `network`=(`<network id>` or `<network name>`) + - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `since`=(`<container id>` or `<container name>`) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`<volume name>` or `<mount point destination>`) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-<value>` where `<value>` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`<image-name>[:<tag>]`) + - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `until=<timestamp>` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.52. + type: "integer" + format: "int64" + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=<id>` + - `parent=<id>` + - `type=<string>` + - `description=<string>` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: |- + Include Manifests in the image summary. + + The `manifests` and `platform` options are mutually exclusive, and + an error is produced if both are set. + type: "boolean" + default: false + required: false + - name: "platform" + type: "string" + in: "query" + description: |- + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show inspect. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + The `platform` and `manifests` options are mutually exclusive, and + an error is produced if both are set. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + $ref: "#/definitions/ImageHistoryResponseItem" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + + <p><br /></p> + + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=<number>` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=<string>` config name or ID + - `container=<string>` container name or ID + - `daemon=<string>` daemon name or ID + - `event=<string>` event type + - `image=<string>` image name or ID + - `label=<string>` image or container label + - `network=<string>` network name or ID + - `node=<string>` node ID + - `plugin`=<string> plugin name or ID + - `scope`=<string> local or swarm + - `secret=<string>` secret name or ID + - `service=<string>` service name or ID + - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=<string>` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-<value>` where `<value>` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=<volume-driver-name>` Matches volumes based on their driver. + - `label=<key>` or `label=<key>:<value>` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=<volume-name>` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=<driver-name>` Matches a network's driver. + - `id=<network-id>` Matches all or part of a network ID. + - `label=<key>` or `label=<key>=<value>` of a network label. + - `name=<network-name>` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" + Priority: 100 + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=<capability name>` + - `enable=<true>|<false>` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=<node id>` + - `label=<engine label>` + - `membership=`(`accepted`|`pending`)` + - `name=<node name>` + - `node.label=<node label>` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=<service id>` + - `label=<service label>` + - `mode=["replicated"|"global"]` + - `name=<service name>` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=<task id>` + - `label=key` or `label="key=value"` + - `name=<task name>` + - `node=<node id or name>` + - `service=<service name>` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=<secret id>` + - `label=<key> or label=<key>=value` + - `name=<secret name>` + - `names=<secret name>` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=<config id>` + - `label=<key> or label=<key>=value` + - `name=<config name>` + - `names=<config name>` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/api/docs/v1.51.yaml b/_vendor/github.com/moby/moby/api/docs/v1.51.yaml new file mode 100644 index 00000000000..4213460ad37 --- /dev/null +++ b/_vendor/github.com/moby/moby/api/docs/v1.51.yaml @@ -0,0 +1,13457 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.51" +info: + title: "Docker Engine API" + version: "1.51" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.51/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "image" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `cluster` a Swarm cluster volume + - `image` Mounts an image. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + + **Deprecated**: This field is deprecated as kernel 6.12 has deprecated `memory.kmem.tcp.limit_in_bytes` field + for cgroups v1. This field will be removed in a future release. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10<sup>-9</sup> CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:<name|id>`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `<container name>[:<ro|rw>]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:<name|id>"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:<name|id>"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`<user-name|UID>[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. + + Deprecated: This field is only set when the daemon is started with the --bridge flag specified. + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. + type: "string" + example: "" + LinkLocalIPv6PrefixLen: + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. + type: "integer" + example: "" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + SecondaryIPAddresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + SecondaryIPv6Addresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + + <p><br /></p> + + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `<port>/<protocol>`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + + > **Deprecated**: This field is only set when using the deprecated + > legacy builder. It is included in API responses for informational + > purposes, but should not be depended on as it will be omitted + > once the legacy builder is removed. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + + > **Deprecated**: This field is only set when using the deprecated + > legacy builder. It is included in API responses for informational + > purposes, but should not be depended on as it will be omitted + > once the legacy builder is removed. + type: "string" + x-nullable: false + example: "27.0.1" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + GraphDriver: + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + example: false + IPAM: + $ref: "#/definitions/IPAM" + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + example: "container_1" + EndpointID: + type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + example: "02:42:ac:13:00:02" + IPv4Address: + type: "string" + example: "172.19.0.2/16" + IPv6Address: + type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.<network-name>`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: |- + Docker Version used to create the plugin. + + Depending on how the plugin was created, this field may be empty or omitted. + + Deprecated: this field is no longer set, and will be removed in the next API version. + type: "string" + x-nullable: false + x-omitempty: true + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + + <p><br /></p> + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + format: "uint64" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + + <p><br /></p> + + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:<id>`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. + + The fields in this object differ between cgroups v1 and v2. + On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available. + On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + + <p><br /></p> + + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + + **Deprecated**: This field is deprecated as kernel 6.12 has deprecated kernel memory TCP accounting. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + + <p><br /></p> + + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. + + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + + <p><br /></p> + + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + + <p><br /></p> + + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) + - `before`=(`<container id>` or `<container name>`) + - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `exited=<int>` containers with exit code of `<int>` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=<ID>` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=<name>` a container's name + - `network`=(`<network id>` or `<network name>`) + - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `since`=(`<container id>` or `<container name>`) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`<volume name>` or `<mount point destination>`) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-<value>` where `<value>` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`<image-name>[:<tag>]`) + - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `until=<timestamp>` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.52. + type: "integer" + format: "int64" + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=<id>` + - `parent=<id>` + - `type=<string>` + - `description=<string>` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: |- + Include Manifests in the image summary. + + The `manifests` and `platform` options are mutually exclusive, and + an error is produced if both are set. + type: "boolean" + default: false + required: false + - name: "platform" + type: "string" + in: "query" + description: |- + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show inspect. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + The `platform` and `manifests` options are mutually exclusive, and + an error is produced if both are set. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + $ref: "#/definitions/ImageHistoryResponseItem" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + + <p><br /></p> + + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=<number>` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=<string>` config name or ID + - `container=<string>` container name or ID + - `daemon=<string>` daemon name or ID + - `event=<string>` event type + - `image=<string>` image name or ID + - `label=<string>` image or container label + - `network=<string>` network name or ID + - `node=<string>` node ID + - `plugin`=<string> plugin name or ID + - `scope`=<string> local or swarm + - `secret=<string>` secret name or ID + - `service=<string>` service name or ID + - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=<string>` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-<value>` where `<value>` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=<volume-driver-name>` Matches volumes based on their driver. + - `label=<key>` or `label=<key>:<value>` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=<volume-name>` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=<driver-name>` Matches a network's driver. + - `id=<network-id>` Matches all or part of a network ID. + - `label=<key>` or `label=<key>=<value>` of a network label. + - `name=<network-name>` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" + Priority: 100 + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=<capability name>` + - `enable=<true>|<false>` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=<node id>` + - `label=<engine label>` + - `membership=`(`accepted`|`pending`)` + - `name=<node name>` + - `node.label=<node label>` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=<service id>` + - `label=<service label>` + - `mode=["replicated"|"global"]` + - `name=<service name>` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=<task id>` + - `label=key` or `label="key=value"` + - `name=<task name>` + - `node=<node id or name>` + - `service=<service name>` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=<secret id>` + - `label=<key> or label=<key>=value` + - `name=<secret name>` + - `names=<secret name>` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=<config id>` + - `label=<key> or label=<key>=value` + - `name=<config name>` + - `names=<config name>` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/api/docs/v1.52.yaml b/_vendor/github.com/moby/moby/api/docs/v1.52.yaml new file mode 100644 index 00000000000..1ef2f79ac8c --- /dev/null +++ b/_vendor/github.com/moby/moby/api/docs/v1.52.yaml @@ -0,0 +1,13621 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.52" +info: + title: "Docker Engine API" + version: "1.52" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.52/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + PortSummary: + type: "object" + description: | + Describes a port-mapping between the container and the host. + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + x-go-type: + type: Addr + import: + package: net/netip + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "image" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + description: | + The name of the device driver to use for this request. + + Note that if this is specified the capabilities are ignored when + selecting a device driver. + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + + Note that if a driver is specified the capabilities have no effect on + selecting a driver as the driver name is used directly. + + Note that if no driver is specified the capabilities are used to + select a driver with the required capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `cluster` a Swarm cluster volume + - `image` Mounts an image. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10<sup>-9</sup> CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:<name|id>`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `<container name>[:<ro|rw>]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + format: "ip-address" + x-go-type: + type: Addr + import: + package: net/netip + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:<name|id>"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:<name|id>"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`<user-name|UID>[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + Ports: + $ref: "#/definitions/PortMap" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `<port>/<protocol>`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + x-go-type: + type: Addr + import: + package: net/netip + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + Storage: + description: | + Information about the storage used by the container. + type: "object" + properties: + RootFS: + description: | + Information about the storage used for the container's root filesystem. + type: "object" + x-nullable: true + $ref: "#/definitions/RootFSStorage" + + RootFSStorage: + description: | + Information about the storage used for the container's root filesystem. + type: "object" + x-go-name: RootFSStorage + properties: + Snapshot: + description: | + Information about the snapshot used for the container's root filesystem. + type: "object" + x-nullable: true + $ref: "#/definitions/RootFSStorageSnapshot" + + RootFSStorageSnapshot: + description: | + Information about a snapshot backend of the container's root filesystem. + type: "object" + x-go-name: RootFSStorageSnapshot + properties: + Name: + description: "Name of the snapshotter." + type: "string" + x-nullable: false + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: true + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: true + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + GraphDriver: + x-nullable: true + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + ImagesDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/image" + description: | + represents system data usage for image resources. + properties: + ActiveCount: + description: | + Count of active images. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all images. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing unused images. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by images. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of image summaries. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Summary + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + AuthResponse: + description: | + An identity token was generated successfully. + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + example: "Login Succeeded" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + example: "9cbaf023786cd7..." + x-nullable: false + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + x-nullable: false + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumesDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/volume" + description: | + represents system data usage for volume resources. + properties: + ActiveCount: + description: | + Count of active volumes. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all volumes. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive volumes. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by volumes. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of volumes. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Volume + + VolumeCreateRequest: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateRequest" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + x-omitempty: false + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + x-go-name: "ID" + x-omitempty: false + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-omitempty: false + x-go-type: + type: Time + import: + package: time + hints: + nullable: false + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + x-omitempty: false + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + x-omitempty: false + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + x-omitempty: false + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + x-omitempty: false + example: false + IPAM: + description: | + The network's IP Address Management. + $ref: "#/definitions/IPAM" + x-nullable: false + x-omitempty: false + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + x-nullable: false + x-omitempty: false + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + x-omitempty: false + x-nullable: false + default: false + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: | + Metadata specific to the network being created. + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + x-omitempty: true + items: + $ref: "#/definitions/PeerInfo" + + NetworkSummary: + description: "Network list response item" + x-go-name: Summary + type: "object" + allOf: + - $ref: "#/definitions/Network" + + NetworkInspect: + description: 'The body of the "get network" http response message.' + x-go-name: Inspect + type: "object" + allOf: + - $ref: "#/definitions/Network" + properties: + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + x-omitempty: false + additionalProperties: + $ref: "#/definitions/EndpointResource" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Services: + description: | + List of services using the network. This field is only present for + swarm scope networks, and omitted for local scope networks. + type: "object" + x-omitempty: true + additionalProperties: + x-go-type: + type: ServiceInfo + hints: + nullable: false + Status: + description: > + provides runtime information about the network + such as the number of allocated IPs. + $ref: "#/definitions/NetworkStatus" + + NetworkStatus: + description: > + provides runtime information about the network + such as the number of allocated IPs. + type: "object" + x-go-name: Status + properties: + IPAM: + $ref: "#/definitions/IPAMStatus" + + ServiceInfo: + x-nullable: false + x-omitempty: false + description: > + represents service parameters with the list of service's tasks + type: "object" + properties: + VIP: + type: "string" + x-omitempty: false + x-go-type: + type: Addr + import: + package: net/netip + Ports: + type: "array" + x-omitempty: false + items: + type: "string" + LocalLBIndex: + type: "integer" + format: "int" + x-omitempty: false + x-go-type: + type: int + Tasks: + type: "array" + x-omitempty: false + items: + $ref: "#/definitions/NetworkTaskInfo" + + NetworkTaskInfo: + x-nullable: false + x-omitempty: false + x-go-name: Task + description: > + carries the information about one backend task + type: "object" + properties: + Name: + type: "string" + x-omitempty: false + EndpointID: + type: "string" + x-omitempty: false + EndpointIP: + type: "string" + x-omitempty: false + x-go-type: + type: Addr + import: + package: net/netip + Info: + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + + ConfigReference: + x-nullable: false + x-omitempty: false + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + x-omitempty: false + example: "config_only_network_01" + + IPAM: + type: "object" + x-nullable: false + x-omitempty: false + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + IPAMStatus: + type: "object" + x-nullable: false + x-omitempty: false + properties: + Subnets: + type: "object" + additionalProperties: + $ref: "#/definitions/SubnetStatus" + example: + "172.16.0.0/16": + IPsInUse: 3 + DynamicIPsAvailable: 65533 + "2001:db8:abcd:0012::0/96": + IPsInUse: 5 + DynamicIPsAvailable: 4294967291 + x-go-type: + type: SubnetStatuses + kind: map + + SubnetStatus: + type: "object" + x-nullable: false + x-omitempty: false + properties: + IPsInUse: + description: > + Number of IP addresses in the subnet that are in use or reserved and + are therefore unavailable for allocation, saturating at 2<sup>64</sup> - 1. + type: integer + format: uint64 + x-omitempty: false + DynamicIPsAvailable: + description: > + Number of IP addresses within the network's IPRange for the subnet + that are available for allocation, saturating at 2<sup>64</sup> - 1. + type: integer + format: uint64 + x-omitempty: false + + EndpointResource: + type: "object" + description: > + contains network resources allocated and used for a + container in a network. + properties: + Name: + type: "string" + x-omitempty: false + example: "container_1" + EndpointID: + type: "string" + x-omitempty: false + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + x-omitempty: false + example: "02:42:ac:13:00:02" + x-go-type: + type: HardwareAddr + IPv4Address: + type: "string" + x-omitempty: false + example: "172.19.0.2/16" + x-go-type: + type: Prefix + import: + package: net/netip + IPv6Address: + type: "string" + x-omitempty: false + example: "" + x-go-type: + type: Prefix + import: + package: net/netip + + PeerInfo: + description: > + represents one peer of an overlay network. + type: "object" + x-nullable: false + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + x-omitempty: false + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + x-omitempty: false + example: "10.133.77.91" + x-go-type: + type: Addr + import: + package: net/netip + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + BuildCacheDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/build" + description: | + represents system data usage for build cache resources. + properties: + ActiveCount: + description: | + Count of active build cache records. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all build cache records. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive build cache records. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by build cache records. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of build cache records. + type: "array" + x-omitempty: true + items: + x-go-type: + type: CacheRecord + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + NetworkConnectRequest: + description: | + NetworkConnectRequest represents the data to be used to connect a container to a network. + type: "object" + x-go-name: "ConnectRequest" + required: ["Container"] + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + x-nullable: false + example: "3613f73ba0e4" + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + x-nullable: true + + NetworkDisconnectRequest: + description: | + NetworkDisconnectRequest represents the data to be used to disconnect a container from a network. + type: "object" + x-go-name: "DisconnectRequest" + required: ["Container"] + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + x-nullable: false + example: "3613f73ba0e4" + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + default: false + x-nullable: false + x-omitempty: false + example: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + x-go-type: + type: HardwareAddr + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + x-go-type: + type: Addr + import: + package: net/netip + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + x-go-type: + type: Addr + import: + package: net/netip + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + x-go-type: + type: Addr + import: + package: net/netip + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.<network-name>`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + x-go-type: + type: Addr + import: + package: net/netip + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + x-go-type: + type: Addr + import: + package: net/netip + LinkLocalIPs: + type: "array" + items: + type: "string" + x-go-type: + type: Addr + import: + package: net/netip + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-go-name: "Mount" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + x-go-name: "Device" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-go-name: "Env" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "Privilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + x-go-name: "Plugin" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "user-configurable settings for the plugin." + type: "object" + x-go-name: "Settings" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-go-name: "PluginReference" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-go-name: "Config" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + x-go-name: "Interface" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + type: "string" + x-go-type: + type: "CapabilityID" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-go-name: "User" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-go-name: "NetworkConfig" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-go-name: "LinuxConfig" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-go-name: "Args" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + x-go-name: "RootFS" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + + <p><br /></p> + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + SwapBytes: + description: | + Amount of swap in bytes - can only be used together with a memory limit. + If not specified, the default behaviour is to grant a swap space twice + as big as the memory limit. + Set to -1 to enable unlimited swap. + type: "integer" + format: "int64" + minimum: -1 + x-nullable: true + x-omitempty: true + MemorySwappiness: + description: | + Tune the service's containers' memory swappiness (0 to 100). + If not specified, defaults to the containers' OS' default, generally 60, + or whatever value was predefined in the image. + Set to -1 to unset a previously set value. + type: "integer" + format: "int64" + minimum: -1 + maximum: 100 + x-nullable: true + x-omitempty: true + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + format: "uint64" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + + <p><br /></p> + + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + x-nullable: true + Storage: + $ref: "#/definitions/Storage" + x-nullable: true + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/PortSummary" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:<id>`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + Health: + type: "object" + description: |- + Summary of health status + + Added in v1.52, before that version all container summary not include Health. + After this attribute introduced, it includes containers with no health checks configured, + or containers that are not running with none + properties: + Status: + type: "string" + description: |- + the health status of the container + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + + ContainersDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/container" + description: | + represents system data usage information for container resources. + properties: + ActiveCount: + description: | + Count of active containers. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all containers. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive containers. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by containers. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of container summaries. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Summary + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + id: + description: | + ID of the container for which the stats were collected. + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + name: + description: | + Name of the container for which the stats were collected. + type: "string" + x-nullable: true + example: "boring_wozniak" + os_type: + description: | + OSType is the OS of the container ("linux" or "windows") to allow + platform-specific handling of stats. + type: "string" + x-nullable: true + example: "linux" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. + + The fields in this object differ between cgroups v1 and v2. + On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available. + On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + + <p><br /></p> + + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + + <p><br /></p> + + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. + + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + + <p><br /></p> + + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + + <p><br /></p> + + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) + - `before`=(`<container id>` or `<container name>`) + - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `exited=<int>` containers with exit code of `<int>` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=<ID>` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=<name>` a container's name + - `network`=(`<network id>` or `<network name>`) + - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `since`=(`<container id>` or `<container name>`) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`<volume name>` or `<mount point destination>`) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-<value>` where `<value>` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`<image-name>[:<tag>]`) + - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `until=<timestamp>` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=<id>` + - `parent=<id>` + - `type=<string>` + - `description=<string>` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: |- + Include Manifests in the image summary. + + The `manifests` and `platform` options are mutually exclusive, and + an error is produced if both are set. + type: "boolean" + default: false + required: false + - name: "platform" + type: "string" + in: "query" + description: |- + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show inspect. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + The `platform` and `manifests` options are mutually exclusive, and + an error is produced if both are set. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + $ref: "#/definitions/ImageHistoryResponseItem" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + + <p><br /></p> + + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=<number>` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + $ref: "#/definitions/AuthResponse" + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/x-ndjson" + - "application/json-seq" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=<string>` config name or ID + - `container=<string>` container name or ID + - `daemon=<string>` daemon name or ID + - `event=<string>` event type + - `image=<string>` image name or ID + - `label=<string>` image or container label + - `network=<string>` network name or ID + - `node=<string>` node ID + - `plugin`=<string> plugin name or ID + - `scope`=<string> local or swarm + - `secret=<string>` secret name or ID + - `service=<string>` service name or ID + - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=<string>` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + ImageUsage: + $ref: "#/definitions/ImagesDiskUsage" + ContainerUsage: + $ref: "#/definitions/ContainersDiskUsage" + VolumeUsage: + $ref: "#/definitions/VolumesDiskUsage" + BuildCacheUsage: + $ref: "#/definitions/BuildCacheDiskUsage" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + - name: "verbose" + in: "query" + description: | + Show detailed information on space usage. + type: "boolean" + default: false + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform(s) which will be used to select the + platform-specific image(s) to be saved if the image is + multi-platform. If not provided, the full multi-platform image + will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform(s) which will be used to select the + platform-specific image(s) to load if the image is + multi-platform. If not provided, the full multi-platform image + will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-<value>` where `<value>` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=<volume-driver-name>` Matches volumes based on their driver. + - `label=<key>` or `label=<key>:<value>` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=<volume-name>` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateRequest" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/NetworkSummary" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=<driver-name>` Matches a network's driver. + - `id=<network-id>` Matches all or part of a network ID. + - `label=<key>` or `label=<key>=<value>` of a network label. + - `name=<network-name>` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/NetworkInspect" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + $ref: "#/definitions/NetworkConnectRequest" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + $ref: "#/definitions/NetworkDisconnectRequest" + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=<capability name>` + - `enable=<true>|<false>` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=<node id>` + - `label=<engine label>` + - `membership=`(`accepted`|`pending`)` + - `name=<node name>` + - `node.label=<node label>` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=<service id>` + - `label=<service label>` + - `mode=["replicated"|"global"]` + - `name=<service name>` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=<task id>` + - `label=key` or `label="key=value"` + - `name=<task name>` + - `node=<node id or name>` + - `service=<service name>` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=<secret id>` + - `label=<key> or label=<key>=value` + - `name=<secret name>` + - `names=<secret name>` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=<config id>` + - `label=<key> or label=<key>=value` + - `name=<config name>` + - `names=<config name>` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/api/docs/v1.53.yaml b/_vendor/github.com/moby/moby/api/docs/v1.53.yaml new file mode 100644 index 00000000000..32cb96eab56 --- /dev/null +++ b/_vendor/github.com/moby/moby/api/docs/v1.53.yaml @@ -0,0 +1,13873 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.53" +info: + title: "Docker Engine API" + version: "1.53" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.52/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + PortSummary: + type: "object" + description: | + Describes a port-mapping between the container and the host. + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + x-go-type: + type: Addr + import: + package: net/netip + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "image" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + description: | + The name of the device driver to use for this request. + + Note that if this is specified the capabilities are ignored when + selecting a device driver. + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + + Note that if a driver is specified the capabilities have no effect on + selecting a driver as the driver name is used directly. + + Note that if no driver is specified the capabilities are used to + select a driver with the required capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `cluster` a Swarm cluster volume + - `image` Mounts an image. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10<sup>-9</sup> CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:<name|id>`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `<container name>[:<ro|rw>]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + format: "ip-address" + x-go-type: + type: Addr + import: + package: net/netip + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:<name|id>"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:<name|id>"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`<user-name|UID>[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + Ports: + $ref: "#/definitions/PortMap" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `<port>/<protocol>`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + x-go-type: + type: Addr + import: + package: net/netip + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + Storage: + description: | + Information about the storage used by the container. + type: "object" + properties: + RootFS: + description: | + Information about the storage used for the container's root filesystem. + type: "object" + x-nullable: true + $ref: "#/definitions/RootFSStorage" + + RootFSStorage: + description: | + Information about the storage used for the container's root filesystem. + type: "object" + x-go-name: RootFSStorage + properties: + Snapshot: + description: | + Information about the snapshot used for the container's root filesystem. + type: "object" + x-nullable: true + $ref: "#/definitions/RootFSStorageSnapshot" + + RootFSStorageSnapshot: + description: | + Information about a snapshot backend of the container's root filesystem. + type: "object" + x-go-name: RootFSStorageSnapshot + properties: + Name: + description: "Name of the snapshotter." + type: "string" + x-nullable: false + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + Identity: + description: |- + Identity holds information about the identity and origin of the image. + This is trusted information verified by the daemon and cannot be modified + by tagging an image to a different name. + x-nullable: true + $ref: "#/definitions/Identity" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: true + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: true + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + GraphDriver: + x-nullable: true + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + Identity: + description: |- + Identity holds information about the identity and origin of the image. + This is trusted information verified by the daemon and cannot be modified + by tagging an image to a different name. + type: "object" + properties: + Signature: + description: |- + Signature contains the properties of verified signatures for the image. + type: "array" + items: + $ref: "#/definitions/SignatureIdentity" + Pull: + description: |- + Pull contains remote location information if image was created via pull. + If image was pulled via mirror, this contains the original repository location. + After successful push this images also contains the pushed repository location. + type: "array" + items: + $ref: "#/definitions/PullIdentity" + Build: + description: |- + Build contains build reference information if image was created via build. + type: "array" + items: + $ref: "#/definitions/BuildIdentity" + + BuildIdentity: + description: |- + BuildIdentity contains build reference information if image was created via build. + type: "object" + properties: + Ref: + description: |- + Ref is the identifier for the build request. This reference can be used to + look up the build details in BuildKit history API. + type: "string" + CreatedAt: + description: |- + CreatedAt is the time when the build ran. + type: "string" + format: "date-time" + + PullIdentity: + description: |- + PullIdentity contains remote location information if image was created via pull. + If image was pulled via mirror, this contains the original repository location. + type: "object" + properties: + Repository: + description: |- + Repository is the remote repository location the image was pulled from. + type: "string" + + SignatureIdentity: + description: |- + SignatureIdentity contains the properties of verified signatures for the image. + type: "object" + properties: + Name: + description: |- + Name is a textual description summarizing the type of signature. + type: "string" + Timestamps: + description: |- + Timestamps contains a list of verified signed timestamps for the signature. + type: "array" + items: + $ref: "#/definitions/SignatureTimestamp" + KnownSigner: + description: |- + KnownSigner is an identifier for a special signer identity that is known to the implementation. + $ref: "#/definitions/KnownSignerIdentity" + DockerReference: + description: |- + DockerReference is the Docker image reference associated with the signature. + This is an optional field only present in older hashedrecord signatures. + type: "string" + Signer: + description: |- + Signer contains information about the signer certificate used to sign the image. + $ref: "#/definitions/SignerIdentity" + SignatureType: + description: |- + SignatureType is the type of signature format. E.g. "bundle-v0.3" or "hashedrecord". + $ref: "#/definitions/SignatureType" + Error: + description: |- + Error contains error information if signature verification failed. + Other fields will be empty in this case. + type: "string" + Warnings: + description: |- + Warnings contains any warnings that occurred during signature verification. + For example, if there was no internet connectivity and cached trust roots were used. + Warning does not indicate a failed verification but may point to configuration issues. + type: "array" + items: + type: "string" + + SignatureTimestamp: + description: |- + SignatureTimestamp contains information about a verified signed timestamp for an image signature. + type: "object" + properties: + Type: + $ref: "#/definitions/SignatureTimestampType" + URI: + type: "string" + Timestamp: + type: "string" + format: "date-time" + + SignatureTimestampType: + description: |- + SignatureTimestampType is the type of timestamp used in the signature. + type: "string" + enum: + - "Tlog" + - "TimestampAuthority" + + SignatureType: + description: |- + SignatureType is the type of signature format. + type: "string" + enum: + - "bundle-v0.3" + - "simplesigning-v1" + + KnownSignerIdentity: + description: |- + KnownSignerIdentity is an identifier for a special signer identity that is known to the implementation. + type: "string" + enum: + - "DHI" + + SignerIdentity: + description: |- + SignerIdentity contains information about the signer certificate used to sign the image. + type: "object" + properties: + CertificateIssuer: + type: "string" + description: |- + CertificateIssuer is the certificate issuer. + SubjectAlternativeName: + type: "string" + description: |- + SubjectAlternativeName is the certificate subject alternative name. + Issuer: + type: "string" + description: |- + The OIDC issuer. Should match `iss` claim of ID token or, in the case of + a federated login like Dex it should match the issuer URL of the + upstream issuer. The issuer is not set the extensions are invalid and + will fail to render. + BuildSignerURI: + type: "string" + description: |- + Reference to specific build instructions that are responsible for signing. + BuildSignerDigest: + type: "string" + description: |- + Immutable reference to the specific version of the build instructions that is responsible for signing. + RunnerEnvironment: + type: "string" + description: |- + Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + SourceRepositoryURI: + type: "string" + description: |- + Source repository URL that the build was based on. + SourceRepositoryDigest: + type: "string" + description: |- + Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryRef: + type: "string" + description: |- + Source Repository Ref that the build run was based upon. + SourceRepositoryIdentifier: + type: "string" + description: |- + Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryOwnerURI: + type: "string" + description: |- + Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerIdentifier: + type: "string" + description: |- + Immutable identifier for the owner of the source repository that the workflow was based upon. + BuildConfigURI: + type: "string" + description: |- + Build Config URL to the top-level/initiating build instructions. + BuildConfigDigest: + type: "string" + description: |- + Immutable reference to the specific version of the top-level/initiating build instructions. + BuildTrigger: + type: "string" + description: |- + Event or action that initiated the build. + RunInvocationURI: + type: "string" + description: |- + Run Invocation URL to uniquely identify the build execution. + SourceRepositoryVisibilityAtSigning: + type: "string" + description: |- + Source repository visibility at the time of signing the certificate. + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + ImagesDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/image" + description: | + represents system data usage for image resources. + properties: + ActiveCount: + description: | + Count of active images. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all images. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing unused images. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by images. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of image summaries. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Summary + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + AuthResponse: + description: | + An identity token was generated successfully. + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + example: "Login Succeeded" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + example: "9cbaf023786cd7..." + x-nullable: false + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + x-nullable: false + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumesDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/volume" + description: | + represents system data usage for volume resources. + properties: + ActiveCount: + description: | + Count of active volumes. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all volumes. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive volumes. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by volumes. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of volumes. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Volume + + VolumeCreateRequest: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateRequest" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + x-omitempty: false + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + x-go-name: "ID" + x-omitempty: false + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-omitempty: false + x-go-type: + type: Time + import: + package: time + hints: + nullable: false + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + x-omitempty: false + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + x-omitempty: false + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + x-omitempty: false + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + x-omitempty: false + example: false + IPAM: + description: | + The network's IP Address Management. + $ref: "#/definitions/IPAM" + x-nullable: false + x-omitempty: false + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + x-nullable: false + x-omitempty: false + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + x-omitempty: false + x-nullable: false + default: false + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: | + Metadata specific to the network being created. + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + x-omitempty: true + items: + $ref: "#/definitions/PeerInfo" + + NetworkSummary: + description: "Network list response item" + x-go-name: Summary + type: "object" + allOf: + - $ref: "#/definitions/Network" + + NetworkInspect: + description: 'The body of the "get network" http response message.' + x-go-name: Inspect + type: "object" + allOf: + - $ref: "#/definitions/Network" + properties: + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + x-omitempty: false + additionalProperties: + $ref: "#/definitions/EndpointResource" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Services: + description: | + List of services using the network. This field is only present for + swarm scope networks, and omitted for local scope networks. + type: "object" + x-omitempty: true + additionalProperties: + x-go-type: + type: ServiceInfo + hints: + nullable: false + Status: + description: > + provides runtime information about the network + such as the number of allocated IPs. + $ref: "#/definitions/NetworkStatus" + + NetworkStatus: + description: > + provides runtime information about the network + such as the number of allocated IPs. + type: "object" + x-go-name: Status + properties: + IPAM: + $ref: "#/definitions/IPAMStatus" + + ServiceInfo: + x-nullable: false + x-omitempty: false + description: > + represents service parameters with the list of service's tasks + type: "object" + properties: + VIP: + type: "string" + x-omitempty: false + x-go-type: + type: Addr + import: + package: net/netip + Ports: + type: "array" + x-omitempty: false + items: + type: "string" + LocalLBIndex: + type: "integer" + format: "int" + x-omitempty: false + x-go-type: + type: int + Tasks: + type: "array" + x-omitempty: false + items: + $ref: "#/definitions/NetworkTaskInfo" + + NetworkTaskInfo: + x-nullable: false + x-omitempty: false + x-go-name: Task + description: > + carries the information about one backend task + type: "object" + properties: + Name: + type: "string" + x-omitempty: false + EndpointID: + type: "string" + x-omitempty: false + EndpointIP: + type: "string" + x-omitempty: false + x-go-type: + type: Addr + import: + package: net/netip + Info: + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + + ConfigReference: + x-nullable: false + x-omitempty: false + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + x-omitempty: false + example: "config_only_network_01" + + IPAM: + type: "object" + x-nullable: false + x-omitempty: false + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + IPAMStatus: + type: "object" + x-nullable: false + x-omitempty: false + properties: + Subnets: + type: "object" + additionalProperties: + $ref: "#/definitions/SubnetStatus" + example: + "172.16.0.0/16": + IPsInUse: 3 + DynamicIPsAvailable: 65533 + "2001:db8:abcd:0012::0/96": + IPsInUse: 5 + DynamicIPsAvailable: 4294967291 + x-go-type: + type: SubnetStatuses + kind: map + + SubnetStatus: + type: "object" + x-nullable: false + x-omitempty: false + properties: + IPsInUse: + description: > + Number of IP addresses in the subnet that are in use or reserved and + are therefore unavailable for allocation, saturating at 2<sup>64</sup> - 1. + type: integer + format: uint64 + x-omitempty: false + DynamicIPsAvailable: + description: > + Number of IP addresses within the network's IPRange for the subnet + that are available for allocation, saturating at 2<sup>64</sup> - 1. + type: integer + format: uint64 + x-omitempty: false + + EndpointResource: + type: "object" + description: > + contains network resources allocated and used for a + container in a network. + properties: + Name: + type: "string" + x-omitempty: false + example: "container_1" + EndpointID: + type: "string" + x-omitempty: false + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + x-omitempty: false + example: "02:42:ac:13:00:02" + x-go-type: + type: HardwareAddr + IPv4Address: + type: "string" + x-omitempty: false + example: "172.19.0.2/16" + x-go-type: + type: Prefix + import: + package: net/netip + IPv6Address: + type: "string" + x-omitempty: false + example: "" + x-go-type: + type: Prefix + import: + package: net/netip + + PeerInfo: + description: > + represents one peer of an overlay network. + type: "object" + x-nullable: false + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + x-omitempty: false + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + x-omitempty: false + example: "10.133.77.91" + x-go-type: + type: Addr + import: + package: net/netip + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + BuildCacheDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/build" + description: | + represents system data usage for build cache resources. + properties: + ActiveCount: + description: | + Count of active build cache records. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all build cache records. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive build cache records. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by build cache records. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of build cache records. + type: "array" + x-omitempty: true + items: + x-go-type: + type: CacheRecord + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + + NRIInfo: + description: | + Information about the Node Resource Interface (NRI). + + This field is only present if NRI is enabled. + type: "object" + x-nullable: true + properties: + Info: + description: | + Information about NRI, provided as "label" / "value" pairs. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["plugin-path", "/opt/docker/nri/plugins"] + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + NetworkConnectRequest: + description: | + NetworkConnectRequest represents the data to be used to connect a container to a network. + type: "object" + x-go-name: "ConnectRequest" + required: ["Container"] + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + x-nullable: false + example: "3613f73ba0e4" + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + x-nullable: true + + NetworkDisconnectRequest: + description: | + NetworkDisconnectRequest represents the data to be used to disconnect a container from a network. + type: "object" + x-go-name: "DisconnectRequest" + required: ["Container"] + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + x-nullable: false + example: "3613f73ba0e4" + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + default: false + x-nullable: false + x-omitempty: false + example: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + x-go-type: + type: HardwareAddr + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + x-go-type: + type: Addr + import: + package: net/netip + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + x-go-type: + type: Addr + import: + package: net/netip + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + x-go-type: + type: Addr + import: + package: net/netip + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.<network-name>`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + x-go-type: + type: Addr + import: + package: net/netip + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + x-go-type: + type: Addr + import: + package: net/netip + LinkLocalIPs: + type: "array" + items: + type: "string" + x-go-type: + type: Addr + import: + package: net/netip + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-go-name: "Mount" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + x-go-name: "Device" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-go-name: "Env" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "Privilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + x-go-name: "Plugin" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "user-configurable settings for the plugin." + type: "object" + x-go-name: "Settings" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-go-name: "PluginReference" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-go-name: "Config" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + x-go-name: "Interface" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + type: "string" + x-go-type: + type: "CapabilityID" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-go-name: "User" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-go-name: "NetworkConfig" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-go-name: "LinuxConfig" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-go-name: "Args" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + x-go-name: "RootFS" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + + <p><br /></p> + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + SwapBytes: + description: | + Amount of swap in bytes - can only be used together with a memory limit. + If not specified, the default behaviour is to grant a swap space twice + as big as the memory limit. + Set to -1 to enable unlimited swap. + type: "integer" + format: "int64" + minimum: -1 + x-nullable: true + x-omitempty: true + MemorySwappiness: + description: | + Tune the service's containers' memory swappiness (0 to 100). + If not specified, defaults to the containers' OS' default, generally 60, + or whatever value was predefined in the image. + Set to -1 to unset a previously set value. + type: "integer" + format: "int64" + minimum: -1 + maximum: 100 + x-nullable: true + x-omitempty: true + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + format: "uint64" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + + <p><br /></p> + + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + x-nullable: true + Storage: + $ref: "#/definitions/Storage" + x-nullable: true + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/PortSummary" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:<id>`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + Health: + type: "object" + description: |- + Summary of health status + + Added in v1.52, before that version all container summary not include Health. + After this attribute introduced, it includes containers with no health checks configured, + or containers that are not running with none + properties: + Status: + type: "string" + description: |- + the health status of the container + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + + ContainersDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/container" + description: | + represents system data usage information for container resources. + properties: + ActiveCount: + description: | + Count of active containers. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all containers. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive containers. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by containers. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of container summaries. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Summary + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + id: + description: | + ID of the container for which the stats were collected. + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + name: + description: | + Name of the container for which the stats were collected. + type: "string" + x-nullable: true + example: "boring_wozniak" + os_type: + description: | + OSType is the OS of the container ("linux" or "windows") to allow + platform-specific handling of stats. + type: "string" + x-nullable: true + example: "linux" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. + + The fields in this object differ between cgroups v1 and v2. + On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available. + On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + + <p><br /></p> + + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + + <p><br /></p> + + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. + + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + + <p><br /></p> + + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" + NRI: + $ref: "#/definitions/NRIInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + + <p><br /></p> + + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) + - `before`=(`<container id>` or `<container name>`) + - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `exited=<int>` containers with exit code of `<int>` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=<ID>` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=<name>` a container's name + - `network`=(`<network id>` or `<network name>`) + - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `since`=(`<container id>` or `<container name>`) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`<volume name>` or `<mount point destination>`) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-<value>` where `<value>` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`<image-name>[:<tag>]`) + - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `until=<timestamp>` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=<id>` + - `parent=<id>` + - `type=<string>` + - `description=<string>` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: |- + Include Manifests in the image summary. + + The `manifests` and `platform` options are mutually exclusive, and + an error is produced if both are set. + type: "boolean" + default: false + required: false + - name: "platform" + type: "string" + in: "query" + description: |- + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show inspect. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + The `platform` and `manifests` options are mutually exclusive, and + an error is produced if both are set. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + $ref: "#/definitions/ImageHistoryResponseItem" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + + <p><br /></p> + + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=<number>` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + $ref: "#/definitions/AuthResponse" + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/jsonl" + - "application/x-ndjson" + - "application/json-seq" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=<string>` config name or ID + - `container=<string>` container name or ID + - `daemon=<string>` daemon name or ID + - `event=<string>` event type + - `image=<string>` image name or ID + - `label=<string>` image or container label + - `network=<string>` network name or ID + - `node=<string>` node ID + - `plugin`=<string> plugin name or ID + - `scope`=<string> local or swarm + - `secret=<string>` secret name or ID + - `service=<string>` service name or ID + - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=<string>` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + ImageUsage: + $ref: "#/definitions/ImagesDiskUsage" + ContainerUsage: + $ref: "#/definitions/ContainersDiskUsage" + VolumeUsage: + $ref: "#/definitions/VolumesDiskUsage" + BuildCacheUsage: + $ref: "#/definitions/BuildCacheDiskUsage" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + - name: "verbose" + in: "query" + description: | + Show detailed information on space usage. + type: "boolean" + default: false + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform(s) which will be used to select the + platform-specific image(s) to be saved if the image is + multi-platform. If not provided, the full multi-platform image + will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform(s) which will be used to select the + platform-specific image(s) to load if the image is + multi-platform. If not provided, the full multi-platform image + will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-<value>` where `<value>` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=<volume-driver-name>` Matches volumes based on their driver. + - `label=<key>` or `label=<key>:<value>` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=<volume-name>` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateRequest" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/NetworkSummary" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=<driver-name>` Matches a network's driver. + - `id=<network-id>` Matches all or part of a network ID. + - `label=<key>` or `label=<key>=<value>` of a network label. + - `name=<network-name>` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/NetworkInspect" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + $ref: "#/definitions/NetworkConnectRequest" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + $ref: "#/definitions/NetworkDisconnectRequest" + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=<capability name>` + - `enable=<true>|<false>` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=<node id>` + - `label=<engine label>` + - `membership=`(`accepted`|`pending`)` + - `name=<node name>` + - `node.label=<node label>` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=<service id>` + - `label=<service label>` + - `mode=["replicated"|"global"]` + - `name=<service name>` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=<task id>` + - `label=key` or `label="key=value"` + - `name=<task name>` + - `node=<node id or name>` + - `service=<service name>` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=<secret id>` + - `label=<key> or label=<key>=value` + - `name=<secret name>` + - `names=<secret name>` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=<config id>` + - `label=<key> or label=<key>=value` + - `name=<config name>` + - `names=<config name>` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + > **Deprecated**: This endpoint is deprecated and will be removed in a future version. + > Server should support gRPC directly on the listening socket. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/api/docs/v1.54.yaml b/_vendor/github.com/moby/moby/api/docs/v1.54.yaml new file mode 100644 index 00000000000..1d2548e4590 --- /dev/null +++ b/_vendor/github.com/moby/moby/api/docs/v1.54.yaml @@ -0,0 +1,13885 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.54" +info: + title: "Docker Engine API" + version: "1.54" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.52/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + ImageHistoryResponseItem: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + PortSummary: + type: "object" + description: | + Describes a port-mapping between the container and the host. + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + x-go-type: + type: Addr + import: + package: net/netip + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountType: + description: |- + The mount type. Available types: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + type: "string" + enum: + - "bind" + - "cluster" + - "image" + - "npipe" + - "tmpfs" + - "volume" + example: "volume" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `cluster` a Swarm cluster volume. + - `image` an OCI image. + - `npipe` a named pipe from the host into the container. + - `tmpfs` a `tmpfs`. + - `volume` a docker volume with the given `Name`. + allOf: + - $ref: "#/definitions/MountType" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + description: | + The name of the device driver to use for this request. + + Note that if this is specified the capabilities are ignored when + selecting a device driver. + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + + Note that if a driver is specified the capabilities have no effect on + selecting a driver as the driver name is used directly. + + Note that if no driver is specified the capabilities are used to + select a driver with the required capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: |- + Mount source (e.g. a volume name, a host path). The source cannot be + specified when using `Type=tmpfs`. For `Type=bind`, the source path + must either exist, or the `CreateMountpoint` must be set to `true` to + create the source path on the host if missing. + + For `Type=npipe`, the pipe must exist prior to creating the container. + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. + - `cluster` a Swarm cluster volume + - `image` Mounts an image. + - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. + - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + allOf: + - $ref: "#/definitions/MountType" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: | + The permission mode for the tmpfs mount in an integer. + The value must not be in octal format (e.g. 755) but rather + the decimal representation of the octal value (e.g. 493). + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10<sup>-9</sup> CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: | + A test to perform to check that the container is healthy. + Healthcheck commands should be side-effect free. + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + + A non-zero exit code indicates a failed healthcheck: + - `0` healthy + - `1` unhealthy + - `2` reserved (treated as unhealthy) + - other values: error running probe + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + + If the health check command does not complete within this timeout, + the check is considered failed and the health check process is + forcibly terminated without a graceful shutdown. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:<name|id>`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `<container name>[:<ro|rw>]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + format: "ip-address" + x-go-type: + type: Addr + import: + package: net/netip + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:<name|id>"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:<name|id>"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`<user-name|UID>[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"<port>/<tcp|udp|sctp>": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + Ports: + $ref: "#/definitions/PortMap" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `<port>/<protocol>`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + x-go-type: + type: Addr + import: + package: net/netip + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + Storage: + description: | + Information about the storage used by the container. + type: "object" + properties: + RootFS: + description: | + Information about the storage used for the container's root filesystem. + type: "object" + x-nullable: true + $ref: "#/definitions/RootFSStorage" + + RootFSStorage: + description: | + Information about the storage used for the container's root filesystem. + type: "object" + x-go-name: RootFSStorage + properties: + Snapshot: + description: | + Information about the snapshot used for the container's root filesystem. + type: "object" + x-nullable: true + $ref: "#/definitions/RootFSStorageSnapshot" + + RootFSStorageSnapshot: + description: | + Information about a snapshot backend of the container's root filesystem. + type: "object" + x-go-name: RootFSStorageSnapshot + properties: + Name: + description: "Name of the snapshotter." + type: "string" + x-nullable: false + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Identity: + description: |- + Identity holds information about the identity and origin of the image. + This is trusted information verified by the daemon and cannot be modified + by tagging an image to a different name. + x-nullable: true + $ref: "#/definitions/Identity" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: true + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: true + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + GraphDriver: + x-nullable: true + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + Identity: + description: |- + Identity holds information about the identity and origin of the image. + This is trusted information verified by the daemon and cannot be modified + by tagging an image to a different name. + type: "object" + properties: + Signature: + description: |- + Signature contains the properties of verified signatures for the image. + type: "array" + items: + $ref: "#/definitions/SignatureIdentity" + Pull: + description: |- + Pull contains remote location information if image was created via pull. + If image was pulled via mirror, this contains the original repository location. + After successful push this images also contains the pushed repository location. + type: "array" + items: + $ref: "#/definitions/PullIdentity" + Build: + description: |- + Build contains build reference information if image was created via build. + type: "array" + items: + $ref: "#/definitions/BuildIdentity" + + BuildIdentity: + description: |- + BuildIdentity contains build reference information if image was created via build. + type: "object" + properties: + Ref: + description: |- + Ref is the identifier for the build request. This reference can be used to + look up the build details in BuildKit history API. + type: "string" + CreatedAt: + description: |- + CreatedAt is the time when the build ran. + type: "string" + format: "date-time" + + PullIdentity: + description: |- + PullIdentity contains remote location information if image was created via pull. + If image was pulled via mirror, this contains the original repository location. + type: "object" + properties: + Repository: + description: |- + Repository is the remote repository location the image was pulled from. + type: "string" + + SignatureIdentity: + description: |- + SignatureIdentity contains the properties of verified signatures for the image. + type: "object" + properties: + Name: + description: |- + Name is a textual description summarizing the type of signature. + type: "string" + Timestamps: + description: |- + Timestamps contains a list of verified signed timestamps for the signature. + type: "array" + items: + $ref: "#/definitions/SignatureTimestamp" + KnownSigner: + description: |- + KnownSigner is an identifier for a special signer identity that is known to the implementation. + $ref: "#/definitions/KnownSignerIdentity" + DockerReference: + description: |- + DockerReference is the Docker image reference associated with the signature. + This is an optional field only present in older hashedrecord signatures. + type: "string" + Signer: + description: |- + Signer contains information about the signer certificate used to sign the image. + $ref: "#/definitions/SignerIdentity" + SignatureType: + description: |- + SignatureType is the type of signature format. E.g. "bundle-v0.3" or "hashedrecord". + $ref: "#/definitions/SignatureType" + Error: + description: |- + Error contains error information if signature verification failed. + Other fields will be empty in this case. + type: "string" + Warnings: + description: |- + Warnings contains any warnings that occurred during signature verification. + For example, if there was no internet connectivity and cached trust roots were used. + Warning does not indicate a failed verification but may point to configuration issues. + type: "array" + items: + type: "string" + + SignatureTimestamp: + description: |- + SignatureTimestamp contains information about a verified signed timestamp for an image signature. + type: "object" + properties: + Type: + $ref: "#/definitions/SignatureTimestampType" + URI: + type: "string" + Timestamp: + type: "string" + format: "date-time" + + SignatureTimestampType: + description: |- + SignatureTimestampType is the type of timestamp used in the signature. + type: "string" + enum: + - "Tlog" + - "TimestampAuthority" + + SignatureType: + description: |- + SignatureType is the type of signature format. + type: "string" + enum: + - "bundle-v0.3" + - "simplesigning-v1" + + KnownSignerIdentity: + description: |- + KnownSignerIdentity is an identifier for a special signer identity that is known to the implementation. + type: "string" + enum: + - "DHI" + + SignerIdentity: + description: |- + SignerIdentity contains information about the signer certificate used to sign the image. + type: "object" + properties: + CertificateIssuer: + type: "string" + description: |- + CertificateIssuer is the certificate issuer. + SubjectAlternativeName: + type: "string" + description: |- + SubjectAlternativeName is the certificate subject alternative name. + Issuer: + type: "string" + description: |- + The OIDC issuer. Should match `iss` claim of ID token or, in the case of + a federated login like Dex it should match the issuer URL of the + upstream issuer. The issuer is not set the extensions are invalid and + will fail to render. + BuildSignerURI: + type: "string" + description: |- + Reference to specific build instructions that are responsible for signing. + BuildSignerDigest: + type: "string" + description: |- + Immutable reference to the specific version of the build instructions that is responsible for signing. + RunnerEnvironment: + type: "string" + description: |- + Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + SourceRepositoryURI: + type: "string" + description: |- + Source repository URL that the build was based on. + SourceRepositoryDigest: + type: "string" + description: |- + Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryRef: + type: "string" + description: |- + Source Repository Ref that the build run was based upon. + SourceRepositoryIdentifier: + type: "string" + description: |- + Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryOwnerURI: + type: "string" + description: |- + Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerIdentifier: + type: "string" + description: |- + Immutable identifier for the owner of the source repository that the workflow was based upon. + BuildConfigURI: + type: "string" + description: |- + Build Config URL to the top-level/initiating build instructions. + BuildConfigDigest: + type: "string" + description: |- + Immutable reference to the specific version of the top-level/initiating build instructions. + BuildTrigger: + type: "string" + description: |- + Event or action that initiated the build. + RunInvocationURI: + type: "string" + description: |- + Run Invocation URL to uniquely identify the build execution. + SourceRepositoryVisibilityAtSigning: + type: "string" + description: |- + Source repository visibility at the time of signing the certificate. + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + ImagesDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/image" + description: | + represents system data usage for image resources. + properties: + ActiveCount: + description: | + Count of active images. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all images. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing unused images. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by images. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of image summaries. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Summary + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + AuthResponse: + description: | + An identity token was generated successfully. + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + example: "Login Succeeded" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + example: "9cbaf023786cd7..." + x-nullable: false + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + x-nullable: false + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumesDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/volume" + description: | + represents system data usage for volume resources. + properties: + ActiveCount: + description: | + Count of active volumes. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all volumes. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive volumes. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by volumes. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of volumes. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Volume + + VolumeCreateRequest: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateRequest" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + x-omitempty: false + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + x-go-name: "ID" + x-omitempty: false + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-omitempty: false + x-go-type: + type: Time + import: + package: time + hints: + nullable: false + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + x-omitempty: false + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + x-omitempty: false + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + x-omitempty: false + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + x-omitempty: false + example: false + IPAM: + description: | + The network's IP Address Management. + $ref: "#/definitions/IPAM" + x-nullable: false + x-omitempty: false + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + x-nullable: false + x-omitempty: false + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + x-nullable: false + x-omitempty: false + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + x-omitempty: false + x-nullable: false + default: false + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: | + Metadata specific to the network being created. + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + x-omitempty: true + items: + $ref: "#/definitions/PeerInfo" + + NetworkSummary: + description: "Network list response item" + x-go-name: Summary + type: "object" + allOf: + - $ref: "#/definitions/Network" + + NetworkInspect: + description: 'The body of the "get network" http response message.' + x-go-name: Inspect + type: "object" + allOf: + - $ref: "#/definitions/Network" + properties: + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + x-omitempty: false + additionalProperties: + $ref: "#/definitions/EndpointResource" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Services: + description: | + List of services using the network. This field is only present for + swarm scope networks, and omitted for local scope networks. + type: "object" + x-omitempty: true + additionalProperties: + x-go-type: + type: ServiceInfo + hints: + nullable: false + Status: + description: > + provides runtime information about the network + such as the number of allocated IPs. + $ref: "#/definitions/NetworkStatus" + + NetworkStatus: + description: > + provides runtime information about the network + such as the number of allocated IPs. + type: "object" + x-go-name: Status + properties: + IPAM: + $ref: "#/definitions/IPAMStatus" + + ServiceInfo: + x-nullable: false + x-omitempty: false + description: > + represents service parameters with the list of service's tasks + type: "object" + properties: + VIP: + type: "string" + x-omitempty: false + x-go-type: + type: Addr + import: + package: net/netip + Ports: + type: "array" + x-omitempty: false + items: + type: "string" + LocalLBIndex: + type: "integer" + format: "int" + x-omitempty: false + x-go-type: + type: int + Tasks: + type: "array" + x-omitempty: false + items: + $ref: "#/definitions/NetworkTaskInfo" + + NetworkTaskInfo: + x-nullable: false + x-omitempty: false + x-go-name: Task + description: > + carries the information about one backend task + type: "object" + properties: + Name: + type: "string" + x-omitempty: false + EndpointID: + type: "string" + x-omitempty: false + EndpointIP: + type: "string" + x-omitempty: false + x-go-type: + type: Addr + import: + package: net/netip + Info: + type: "object" + x-omitempty: false + additionalProperties: + type: "string" + + ConfigReference: + x-nullable: false + x-omitempty: false + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + x-omitempty: false + example: "config_only_network_01" + + IPAM: + type: "object" + x-nullable: false + x-omitempty: false + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + IPAMStatus: + type: "object" + x-nullable: false + x-omitempty: false + properties: + Subnets: + type: "object" + additionalProperties: + $ref: "#/definitions/SubnetStatus" + example: + "172.16.0.0/16": + IPsInUse: 3 + DynamicIPsAvailable: 65533 + "2001:db8:abcd:0012::0/96": + IPsInUse: 5 + DynamicIPsAvailable: 4294967291 + x-go-type: + type: SubnetStatuses + kind: map + + SubnetStatus: + type: "object" + x-nullable: false + x-omitempty: false + properties: + IPsInUse: + description: > + Number of IP addresses in the subnet that are in use or reserved and + are therefore unavailable for allocation, saturating at 2<sup>64</sup> - 1. + type: integer + format: uint64 + x-omitempty: false + DynamicIPsAvailable: + description: > + Number of IP addresses within the network's IPRange for the subnet + that are available for allocation, saturating at 2<sup>64</sup> - 1. + type: integer + format: uint64 + x-omitempty: false + + EndpointResource: + type: "object" + description: > + contains network resources allocated and used for a + container in a network. + properties: + Name: + type: "string" + x-omitempty: false + example: "container_1" + EndpointID: + type: "string" + x-omitempty: false + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + x-omitempty: false + example: "02:42:ac:13:00:02" + x-go-type: + type: HardwareAddr + IPv4Address: + type: "string" + x-omitempty: false + example: "172.19.0.2/16" + x-go-type: + type: Prefix + import: + package: net/netip + IPv6Address: + type: "string" + x-omitempty: false + example: "" + x-go-type: + type: Prefix + import: + package: net/netip + + PeerInfo: + description: > + represents one peer of an overlay network. + type: "object" + x-nullable: false + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + x-omitempty: false + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + x-omitempty: false + example: "10.133.77.91" + x-go-type: + type: Addr + import: + package: net/netip + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + BuildCacheDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/build" + description: | + represents system data usage for build cache resources. + properties: + ActiveCount: + description: | + Count of active build cache records. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all build cache records. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive build cache records. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by build cache records. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of build cache records. + type: "array" + x-omitempty: true + items: + x-go-type: + type: CacheRecord + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + + NRIInfo: + description: | + Information about the Node Resource Interface (NRI). + + This field is only present if NRI is enabled. + type: "object" + x-nullable: true + properties: + Info: + description: | + Information about NRI, provided as "label" / "value" pairs. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["plugin-path", "/opt/docker/nri/plugins"] + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + NetworkConnectRequest: + description: | + NetworkConnectRequest represents the data to be used to connect a container to a network. + type: "object" + x-go-name: "ConnectRequest" + required: ["Container"] + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + x-nullable: false + example: "3613f73ba0e4" + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + x-nullable: true + + NetworkDisconnectRequest: + description: | + NetworkDisconnectRequest represents the data to be used to disconnect a container from a network. + type: "object" + x-go-name: "DisconnectRequest" + required: ["Container"] + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + x-nullable: false + example: "3613f73ba0e4" + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + default: false + x-nullable: false + x-omitempty: false + example: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + x-go-type: + type: HardwareAddr + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + x-go-type: + type: Addr + import: + package: net/netip + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + x-go-type: + type: Addr + import: + package: net/netip + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + x-go-type: + type: Addr + import: + package: net/netip + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.<network-name>`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + x-go-type: + type: Addr + import: + package: net/netip + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + x-go-type: + type: Addr + import: + package: net/netip + LinkLocalIPs: + type: "array" + items: + type: "string" + x-go-type: + type: Addr + import: + package: net/netip + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-go-name: "Mount" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + x-go-name: "Device" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-go-name: "Env" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "Privilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + x-go-name: "Plugin" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "user-configurable settings for the plugin." + type: "object" + x-go-name: "Settings" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-go-name: "PluginReference" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-go-name: "Config" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + x-go-name: "Interface" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + type: "string" + x-go-type: + type: "CapabilityID" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-go-name: "User" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-go-name: "NetworkConfig" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-go-name: "LinuxConfig" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-go-name: "Args" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + x-go-name: "RootFS" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + + <p><br /></p> + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + + <p><br /></p> + + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + + <p><br /><p> + + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + + <p><br /></p> + + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + SwapBytes: + description: | + Amount of swap in bytes - can only be used together with a memory limit. + If not specified, the default behaviour is to grant a swap space twice + as big as the memory limit. + Set to -1 to enable unlimited swap. + type: "integer" + format: "int64" + minimum: -1 + x-nullable: true + x-omitempty: true + MemorySwappiness: + description: | + Tune the service's containers' memory swappiness (0 to 100). + If not specified, defaults to the containers' OS' default, generally 60, + or whatever value was predefined in the image. + Set to -1 to unset a previously set value. + type: "integer" + format: "int64" + minimum: -1 + maximum: 100 + x-nullable: true + x-omitempty: true + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + format: "uint64" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + + <p><br /></p> + + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + x-nullable: true + Storage: + $ref: "#/definitions/Storage" + x-nullable: true + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/PortSummary" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:<id>`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + Health: + type: "object" + description: |- + Summary of health status + + Added in v1.52, before that version all container summary not include Health. + After this attribute introduced, it includes containers with no health checks configured, + or containers that are not running with none + properties: + Status: + type: "string" + description: |- + the health status of the container + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + + ContainersDiskUsage: + type: "object" + x-go-name: "DiskUsage" + x-go-package: "github.com/moby/moby/api/types/container" + description: | + represents system data usage information for container resources. + properties: + ActiveCount: + description: | + Count of active containers. + type: "integer" + format: "int64" + example: 1 + TotalCount: + description: | + Count of all containers. + type: "integer" + format: "int64" + example: 4 + Reclaimable: + description: | + Disk space that can be reclaimed by removing inactive containers. + type: "integer" + format: "int64" + example: 12345678 + TotalSize: + description: | + Disk space in use by containers. + type: "integer" + format: "int64" + example: 98765432 + Items: + description: | + List of container summaries. + type: "array" + x-omitempty: true + items: + x-go-type: + type: Summary + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a standard base64-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + id: + description: | + ID of the container for which the stats were collected. + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + name: + description: | + Name of the container for which the stats were collected. + type: "string" + x-nullable: true + example: "boring_wozniak" + os_type: + description: | + OSType is the OS of the container ("linux" or "windows") to allow + platform-specific handling of stats. + type: "string" + x-nullable: true + example: "linux" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. + + The fields in this object differ between cgroups v1 and v2. + On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available. + On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + Architecture of the daemon, as returned by the Go runtime (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + + <p><br /></p> + + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + + <p><br /></p> + + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the operating system. + This is equivalent to the output of `uname -m` on Linux. + + Unlike `Arch` (from `/version`), this reports the machine's native + architecture, which can differ from the Go runtime architecture when + running a binary compiled for a different architecture (for example, + a 32-bit binary running on 64-bit hardware). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + + <p><br /></p> + + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" + NRI: + $ref: "#/definitions/NRIInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `<uid>.<gid>` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + + <p><br /></p> + + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + + <p><br /></p> + + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Identity: + description: | + Identity holds information about the identity and origin of this image. + For image list responses, this can duplicate Build/Pull fields across + image manifests, because those parts are image-level metadata. + x-nullable: true + $ref: "#/definitions/Identity" + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) + - `before`=(`<container id>` or `<container name>`) + - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `exited=<int>` containers with exit code of `<int>` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=<ID>` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=<name>` a container's name + - `network`=(`<network id>` or `<network name>`) + - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) + - `since`=(`<container id>` or `<container name>`) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`<volume name>` or `<mount point destination>`) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) + * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-<value>` where `<value>` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`<image-name>[:<tag>]`) + - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) + - `until=<timestamp>` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + - name: "identity" + in: "query" + description: "Include `Identity` in each manifest summary. Requires `manifests=1`." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: | + BuildKit output configuration in the format of a stringified JSON array of objects. + Each object must have two top-level properties: `Type` and `Attrs`. + The `Type` property must be set to 'moby'. + The `Attrs` property is a map of attributes for the BuildKit output configuration. + See https://docs.docker.com/build/exporters/oci-docker/ for more information. + + Example: + + ``` + [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] + ``` + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=<id>` + - `parent=<id>` + - `type=<string>` + - `description=<string>` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: |- + Include Manifests in the image summary. + + The `manifests` and `platform` options are mutually exclusive, and + an error is produced if both are set. + type: "boolean" + default: false + required: false + - name: "platform" + type: "string" + in: "query" + description: |- + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show inspect. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + The `platform` and `manifests` options are mutually exclusive, and + an error is produced if both are set. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + $ref: "#/definitions/ImageHistoryResponseItem" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: | + Create a tag that refers to a source image. + + This creates an additional reference (tag) to the source image. The tag + can include a different repository name and/or tag. If the repository + or tag already exists, it will be overwritten. + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + + <p><br /></p> + + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=<number>` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + $ref: "#/definitions/AuthResponse" + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/jsonl" + - "application/x-ndjson" + - "application/json-seq" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=<string>` config name or ID + - `container=<string>` container name or ID + - `daemon=<string>` daemon name or ID + - `event=<string>` event type + - `image=<string>` image name or ID + - `label=<string>` image or container label + - `network=<string>` network name or ID + - `node=<string>` node ID + - `plugin`=<string> plugin name or ID + - `scope`=<string> local or swarm + - `secret=<string>` secret name or ID + - `service=<string>` service name or ID + - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=<string>` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + ImageUsage: + $ref: "#/definitions/ImagesDiskUsage" + ContainerUsage: + $ref: "#/definitions/ContainersDiskUsage" + VolumeUsage: + $ref: "#/definitions/VolumesDiskUsage" + BuildCacheUsage: + $ref: "#/definitions/BuildCacheDiskUsage" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + - name: "verbose" + in: "query" + description: | + Show detailed information on space usage. + type: "boolean" + default: false + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform(s) which will be used to select the + platform-specific image(s) to be saved if the image is + multi-platform. If not provided, the full multi-platform image + will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "array" + items: + type: "string" + collectionFormat: "multi" + in: "query" + description: | + JSON encoded OCI platform(s) which will be used to select the + platform-specific image(s) to load if the image is + multi-platform. If not provided, the full multi-platform image + will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-<value>` where `<value>` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=<volume-driver-name>` Matches volumes based on their driver. + - `label=<key>` or `label=<key>:<value>` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=<volume-name>` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateRequest" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/NetworkSummary" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=<boolean>` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=<driver-name>` Matches a network's driver. + - `id=<network-id>` Matches all or part of a network ID. + - `label=<key>` or `label=<key>=<value>` of a network label. + - `name=<network-name>` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/NetworkInspect" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + $ref: "#/definitions/NetworkConnectRequest" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + $ref: "#/definitions/NetworkDisconnectRequest" + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=<capability name>` + - `enable=<true>|<false>` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=<node id>` + - `label=<engine label>` + - `membership=`(`accepted`|`pending`)` + - `name=<node name>` + - `node.label=<node label>` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + `<ip|interface>`), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=<service id>` + - `label=<service label>` + - `mode=["replicated"|"global"]` + - `name=<service name>` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=<task id>` + - `label=key` or `label="key=value"` + - `name=<task name>` + - `node=<node id or name>` + - `service=<service name>` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=<secret id>` + - `label=<key> or label=<key>=value` + - `name=<secret name>` + - `names=<secret name>` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=<config id>` + - `label=<key> or label=<key>=value` + - `name=<config name>` + - `names=<config name>` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + > **Deprecated**: This endpoint is deprecated and will be removed in a future version. + > Server should support gRPC directly on the listening socket. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/docs/api/version-history.md b/_vendor/github.com/moby/moby/docs/api/version-history.md deleted file mode 100644 index 3184e42d737..00000000000 --- a/_vendor/github.com/moby/moby/docs/api/version-history.md +++ /dev/null @@ -1,991 +0,0 @@ ---- -title: "Engine API version history" -description: "Documentation of changes that have been made to Engine API." -keywords: "API, Docker, rcli, REST, documentation" ---- - -<!-- This file is maintained within the moby/moby GitHub - repository at https://github.com/moby/moby/. Make all - pull requests against that repo. If you see this file in - another repository, consider it read-only there, as it will - periodically be overwritten by the definitive file. Pull - requests which include edits to this file in other repositories - will be rejected. ---> - -## v1.49 API changes - -[Docker Engine API v1.49](https://docs.docker.com/reference/api/engine/version/v1.49/) documentation - -* `GET /images/{name}/json` now supports a `platform` parameter (JSON - encoded OCI Platform type) allowing to specify a platform of the multi-platform - image to inspect. - This option is mutually exclusive with the `manifests` option. -* `GET /info` now returns a `FirewallBackend` containing information about - the daemon's firewalling configuration. -* Deprecated: The `AllowNondistributableArtifactsCIDRs` and `AllowNondistributableArtifactsHostnames` - fields in the `RegistryConfig` struct in the `GET /info` response are omitted - in API v1.49. -* Deprecated: The `ContainerdCommit.Expected`, `RuncCommit.Expected`, and - `InitCommit.Expected` fields in the `GET /info` endpoint were deprecated - in API v1.48, and are now omitted in API v1.49. - -## v1.48 API changes - -[Docker Engine API v1.48](https://docs.docker.com/reference/api/engine/version/v1.48/) documentation - -* Deprecated: The "error" and "progress" fields in streaming responses for - endpoints that return a JSON progress response, such as `POST /images/create`, - `POST /images/{name}/push`, and `POST /build` are deprecated. These fields - were marked deprecated in API v1.4 (docker v0.6.0) and API v1.8 (docker v0.7.1) - respectively, but still returned. These fields will be left empty or will - be omitted in a future API version. Users should use the information in the - `errorDetail` and `progressDetail` fields instead. -* Deprecated: The "allow-nondistributable-artifacts" daemon configuration is - deprecated and enabled by default. The `AllowNondistributableArtifactsCIDRs` - and `AllowNondistributableArtifactsHostnames` fields in the `RegistryConfig` - struct in the `GET /info` response will now always be `null` and will be - omitted in API v1.49. -* Deprecated: The `BridgeNfIptables` and `BridgeNfIp6tables` fields in the - `GET /info` response are now always be `false` and will be omitted in API - v1.49. The netfilter module is now loaded on-demand, and no longer during - daemon startup, making these fields obsolete. -* `GET /images/{name}/history` now supports a `platform` parameter (JSON - encoded OCI Platform type) that allows to specify a platform to show the - history of. -* `POST /images/{name}/load` and `GET /images/{name}/get` now support a - `platform` parameter (JSON encoded OCI Platform type) that allows to specify - a platform to load/save. Not passing this parameter will result in - loading/saving the full multi-platform image. -* `POST /containers/create` now includes a warning in the response when setting - the container-wide `Config.VolumeDriver` option in combination with volumes - defined through `Mounts` because the `VolumeDriver` option has no effect on - those volumes. This warning was previously generated by the CLI, but now - moved to the daemon so that other clients can also get this warning. -* `POST /containers/create` now supports `Mount` of type `image` for mounting - an image inside a container. -* Deprecated: The `ContainerdCommit.Expected`, `RuncCommit.Expected`, and - `InitCommit.Expected` fields in the `GET /info` endpoint are deprecated - and will be omitted in API v1.49. -* `Sysctls` in `HostConfig` (top level `--sysctl` settings) for `eth0` are - no longer migrated to `DriverOpts`, as described in the changes for v1.46. -* `GET /images/json` and `GET /images/{name}/json` responses now include - `Descriptor` field, which contains an OCI descriptor of the image target. - The new field will only be populated if the daemon provides a multi-platform - image store. - WARNING: This is experimental and may change at any time without any backward - compatibility. -* `GET /images/{name}/json` response now will return the `Manifests` field - containing information about the sub-manifests contained in the image index. - This includes things like platform-specific manifests and build attestations. - The new field will only be populated if the request also sets the `manifests` - query parameter to `true`. - This acts the same as in the `GET /images/json` endpoint. - WARNING: This is experimental and may change at any time without any backward compatibility. -* `GET /containers/{name}/json` now returns an `ImageManifestDescriptor` field - containing the OCI descriptor of the platform-specific image manifest of the - image that was used to create the container. - This field is only populated if the daemon provides a multi-platform image - store. -* `POST /networks/create` now has an `EnableIPv4` field. Setting it to `false` - disables IPv4 IPAM for the network. It can only be set to `false` if the - daemon has experimental features enabled. -* `GET /networks/{id}` now returns an `EnableIPv4` field showing whether the - network has IPv4 IPAM enabled. -* `POST /networks/{id}/connect` and `POST /containers/create` now accept a - `GwPriority` field in `EndpointsConfig`. This value is used to determine which - network endpoint provides the default gateway for the container. The endpoint - with the highest priority is selected. If multiple endpoints have the same - priority, endpoints are sorted lexicographically by their network name, and - the one that sorts first is picked. -* `GET /containers/json` now returns a `GwPriority` field in `NetworkSettings` - for each network endpoint. -* API debug endpoints (`GET /debug/vars`, `GET /debug/pprof/`, `GET /debug/pprof/cmdline`, - `GET /debug/pprof/profile`, `GET /debug/pprof/symbol`, `GET /debug/pprof/trace`, - `GET /debug/pprof/{name}`) are now also accessible through the versioned-API - paths (`/v<API-version>/<endpoint>`). -* `POST /build/prune` renames `keep-bytes` to `reserved-space` and now supports - additional prune parameters `max-used-space` and `min-free-space`. -* `GET /containers/json` now returns an `ImageManifestDescriptor` field - matching the same field in `/containers/{name}/json`. - This field is only populated if the daemon provides a multi-platform image - store. - -## v1.47 API changes - -[Docker Engine API v1.47](https://docs.docker.com/reference/api/engine/version/v1.47/) documentation - -* `GET /images/json` response now includes `Manifests` field, which contains - information about the sub-manifests included in the image index. This - includes things like platform-specific manifests and build attestations. - The new field will only be populated if the request also sets the `manifests` - query parameter to `true`. - WARNING: This is experimental and may change at any time without any backward - compatibility. -* `GET /info` no longer includes warnings when `bridge-nf-call-iptables` or - `bridge-nf-call-ip6tables` are disabled when the daemon was started. The - `br_netfilter` module is now attempted to be loaded when needed, making those - warnings inaccurate. This change is not versioned, and affects all API versions - if the daemon has this patch. - -## v1.46 API changes - -[Docker Engine API v1.46](https://docs.docker.com/reference/api/engine/version/v1.46/) documentation - -* `GET /info` now includes a `Containerd` field containing information about - the location of the containerd API socket and containerd namespaces used - by the daemon to run containers and plugins. -* `POST /containers/create` field `NetworkingConfig.EndpointsConfig.DriverOpts`, - and `POST /networks/{id}/connect` field `EndpointsConfig.DriverOpts`, now - support label `com.docker.network.endpoint.sysctls` for setting per-interface - sysctls. The value is a comma separated list of sysctl assignments, the - interface name must be "IFNAME". For example, to set - `net.ipv4.config.eth0.log_martians=1`, use - `net.ipv4.config.IFNAME.log_martians=1`. In API versions up-to 1.46, top level - `--sysctl` settings for `eth0` will be migrated to `DriverOpts` when possible. - This automatic migration will be removed in a future release. -* `GET /containers/json` now returns the annotations of containers. -* `POST /images/{name}/push` now supports a `platform` parameter (JSON encoded - OCI Platform type) that allows selecting a specific platform manifest from - the multi-platform image. -* `POST /containers/create` now takes `Options` as part of `HostConfig.Mounts.TmpfsOptions` to set options for tmpfs mounts. -* `POST /services/create` now takes `Options` as part of `ContainerSpec.Mounts.TmpfsOptions`, to set options for tmpfs mounts. -* `GET /events` now supports image `create` event that is emitted when a new - image is built regardless if it was tagged or not. - -### Deprecated Config fields in `GET /images/{name}/json` response - -The `Config` field returned by this endpoint (used for "image inspect") returns -additional fields that are not part of the image's configuration and not part of -the [Docker Image Spec] and the [OCI Image Spec]. - -These additional fields are included in the response, due to an -implementation detail, where the [api/types.ImageInspec] type used -for the response is using the [container.Config] type. - -The [container.Config] type is a superset of the image config, and while the -image's Config is used as a _template_ for containers created from the image, -the additional fields are set at runtime (from options passed when creating -the container) and not taken from the image Config. - -These fields are never set (and always return the default value for the type), -but are not omitted in the response when left empty. As these fields were not -intended to be part of the image configuration response, they are deprecated, -and will be removed from the API. - -The following fields are currently included in the API response, but -are not part of the underlying image's Config, and deprecated: - -- `Hostname` -- `Domainname` -- `AttachStdin` -- `AttachStdout` -- `AttachStderr` -- `Tty` -- `OpenStdin` -- `StdinOnce` -- `Image` -- `NetworkDisabled` (already omitted unless set) -- `MacAddress` (already omitted unless set) -- `StopTimeout` (already omitted unless set) - -[Docker image spec]: https://github.com/moby/docker-image-spec/blob/v1.3.1/specs-go/v1/image.go#L19-L32 -[OCI Image Spec]: https://github.com/opencontainers/image-spec/blob/v1.1.0/specs-go/v1/config.go#L24-L62 -[api/types.ImageInspec]: https://github.com/moby/moby/blob/v26.1.4/api/types/types.go#L87-L104 -[container.Config]: https://github.com/moby/moby/blob/v26.1.4/api/types/container/config.go#L47-L82 - -* `POST /services/create` and `POST /services/{id}/update` now support OomScoreAdj - -## v1.45 API changes - -[Docker Engine API v1.45](https://docs.docker.com/reference/api/engine/version/v1.45/) documentation - -* `POST /containers/create` now supports `VolumeOptions.Subpath` which allows a - subpath of a named volume to be mounted. -* `POST /images/search` will always assume a `false` value for the `is-automated` - field. Consequently, searching for `is-automated=true` will yield no results, - while `is-automated=false` will be a no-op. -* `GET /images/{name}/json` no longer includes the `Container` and - `ContainerConfig` fields. To access image configuration, use `Config` field - instead. -* The `Aliases` field returned in calls to `GET /containers/{name:.*}/json` no - longer contains the short container ID, but instead will reflect exactly the - values originally submitted to the `POST /containers/create` endpoint. The - newly introduced `DNSNames` should now be used instead when short container - IDs are needed. - -## v1.44 API changes - -[Docker Engine API v1.44](https://docs.docker.com/reference/api/engine/version/v1.44/) documentation - -* GET `/images/json` now accepts an `until` filter. This accepts a timestamp and - lists all images created before it. The `<timestamp>` can be Unix timestamps, - date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) - computed relative to the daemon machine’s time. This change is not versioned, - and affects all API versions if the daemon has this patch. -* The `VirtualSize` field in the `GET /images/{name}/json`, `GET /images/json`, - and `GET /system/df` responses is now omitted. Use the `Size` field instead, - which contains the same information. -* Deprecated: The `is_automated` field in the `GET /images/search` response has - been deprecated and will always be set to false in the future because Docker - Hub is deprecating the `is_automated` field in its search API. The deprecation - is not versioned, and applies to all API versions. -* Deprecated: The `is-automated` filter for the `GET /images/search` endpoint. - The `is_automated` field has been deprecated by Docker Hub's search API. - Consequently, searching for `is-automated=true` will yield no results. The - deprecation is not versioned, and applies to all API versions. -* Read-only bind mounts are now made recursively read-only on kernel >= 5.12 - with runtimes which support the feature. - `POST /containers/create`, `GET /containers/{id}/json`, and `GET /containers/json` now supports - `BindOptions.ReadOnlyNonRecursive` and `BindOptions.ReadOnlyForceRecursive` to customize the behavior. -* `POST /containers/create` now accepts a `HealthConfig.StartInterval` to set the - interval for health checks during the start period. -* `GET /info` now includes a `CDISpecDirs` field indicating the configured CDI - specifications directories. The use of the applied setting requires the daemon - to have experimental enabled, and for non-experimental daemons an empty list is - always returned. -* `POST /networks/create` now returns a 400 if the `IPAMConfig` has invalid - values. Note that this change is _unversioned_ and applied to all API - versions on daemon that support version 1.44. -* `POST /networks/create` with a duplicated name now fails systematically. As - such, the `CheckDuplicate` field is now deprecated. Note that this change is - _unversioned_ and applied to all API versions on daemon that support version - 1.44. -* `POST /containers/create` now accepts multiple `EndpointSettings` in - `NetworkingConfig.EndpointSettings`. -* `POST /containers/create` and `POST /networks/{id}/connect` will now catch - validation errors that were previously only returned during `POST /containers/{id}/start`. - These endpoints will also return the full set of validation errors they find, - instead of returning only the first one. - Note that this change is _unversioned_ and applies to all API versions. -* `POST /services/create` and `POST /services/{id}/update` now accept `Seccomp` - and `AppArmor` fields in the `ContainerSpec.Privileges` object. This allows - some configuration of Seccomp and AppArmor in Swarm services. -* A new endpoint-specific `MacAddress` field has been added to `NetworkSettings.EndpointSettings` - on `POST /containers/create`, and to `EndpointConfig` on `POST /networks/{id}/connect`. - The container-wide `MacAddress` field in `Config`, on `POST /containers/create`, is now deprecated. -* The field `Networks` in the `POST /services/create` and `POST /services/{id}/update` - requests is now deprecated. You should instead use the field `TaskTemplate.Networks`. -* The `Container` and `ContainerConfig` fields in the `GET /images/{name}/json` - response are deprecated and will no longer be included in API v1.45. -* `GET /info` now includes `status` properties in `Runtimes`. -* A new field named `DNSNames` and containing all non-fully qualified DNS names - a container takes on a specific network has been added to `GET /containers/{name:.*}/json`. -* The `Aliases` field returned in calls to `GET /containers/{name:.*}/json` in v1.44 and older - versions contains the short container ID. This will change in the next API version, v1.45. - Starting with that API version, this specific value will be removed from the `Aliases` field - such that this field will reflect exactly the values originally submitted to the - `POST /containers/create` endpoint. The newly introduced `DNSNames` should now be used instead. -* The fields `HairpinMode`, `LinkLocalIPv6Address`, `LinkLocalIPv6PrefixLen`, `SecondaryIPAddresses`, - `SecondaryIPv6Addresses` available in `NetworkSettings` when calling `GET /containers/{id}/json` are - deprecated and will be removed in a future release. You should instead look for the default network in - `NetworkSettings.Networks`. -* `GET /images/{id}/json` omits the `Created` field (previously it was `0001-01-01T00:00:00Z`) - if the `Created` field is missing from the image config. - -## v1.43 API changes - -[Docker Engine API v1.43](https://docs.docker.com/reference/api/engine/version/v1.43/) documentation - -* `POST /containers/create` now accepts `Annotations` as part of `HostConfig`. - Can be used to attach arbitrary metadata to the container, which will also be - passed to the runtime when the container is started. -* `GET /images/json` no longer includes hardcoded `<none>:<none>` and - `<none>@<none>` in `RepoTags` and`RepoDigests` for untagged images. - In such cases, empty arrays will be produced instead. -* The `VirtualSize` field in the `GET /images/{name}/json`, `GET /images/json`, - and `GET /system/df` responses is deprecated and will no longer be included - in API v1.44. Use the `Size` field instead, which contains the same information. -* `GET /info` now includes `no-new-privileges` in the `SecurityOptions` string - list when this option is enabled globally. This change is not versioned, and - affects all API versions if the daemon has this patch. - -## v1.42 API changes - -[Docker Engine API v1.42](https://docs.docker.com/reference/api/engine/version/v1.42/) documentation - -* Removed the `BuilderSize` field on the `GET /system/df` endpoint. This field - was introduced in API 1.31 as part of an experimental feature, and no longer - used since API 1.40. - Use field `BuildCache` instead to track storage used by the builder component. -* `POST /containers/{id}/stop` and `POST /containers/{id}/restart` now accept a - `signal` query parameter, which allows overriding the container's default stop- - signal. -* `GET /images/json` now accepts query parameter `shared-size`. When set `true`, - images returned will include `SharedSize`, which provides the size on disk shared - with other images present on the system. -* `GET /system/df` now accepts query parameter `type`. When set, - computes and returns data only for the specified object type. - The parameter can be specified multiple times to select several object types. - Supported values are: `container`, `image`, `volume`, `build-cache`. -* `GET /system/df` can now be used concurrently. If a request is made while a - previous request is still being processed, the request will receive the result - of the already running calculation, once completed. Previously, an error - (`a disk usage operation is already running`) would be returned in this - situation. This change is not versioned, and affects all API versions if the - daemon has this patch. -* The `POST /images/create` now supports both the operating system and architecture - that is passed through the `platform` query parameter when using the `fromSrc` - option to import an image from an archive. Previously, only the operating system - was used and the architecture was ignored. If no `platform` option is set, the - host's operating system and architecture as used as default. This change is not - versioned, and affects all API versions if the daemon has this patch. -* The `POST /containers/{id}/wait` endpoint now returns a `400` status code if an - invalid `condition` is provided (on API 1.30 and up). -* Removed the `KernelMemory` field from the `POST /containers/create` and - `POST /containers/{id}/update` endpoints, any value it is set to will be ignored - on API version `v1.42` and up. Older API versions still accept this field, but - may take no effect, depending on the kernel version and OCI runtime in use. -* `GET /containers/{id}/json` now omits the `KernelMemory` and `KernelMemoryTCP` - if they are not set. -* `GET /info` now omits the `KernelMemory` and `KernelMemoryTCP` if they are not - supported by the host or host's configuration (if cgroups v2 are in use). -* `GET /_ping` and `HEAD /_ping` now return `Builder-Version` by default. - This header contains the default builder to use, and is a recommendation as - advertised by the daemon. However, it is up to the client to choose which builder - to use. - - The default value on Linux is version "2" (BuildKit), but the daemon can be - configured to recommend version "1" (classic Builder). Windows does not yet - support BuildKit for native Windows images, and uses "1" (classic builder) as - a default. - - This change is not versioned, and affects all API versions if the daemon has - this patch. -* `GET /_ping` and `HEAD /_ping` now return a `Swarm` header, which allows a - client to detect if Swarm is enabled on the daemon, without having to call - additional endpoints. - This change is not versioned, and affects all API versions if the daemon has - this patch. Clients must consider this header "optional", and fall back to - using other endpoints to get this information if the header is not present. - - The `Swarm` header can contain one of the following values: - - - "inactive" - - "pending" - - "error" - - "locked" - - "active/worker" - - "active/manager" -* `POST /containers/create` for Windows containers now accepts a new syntax in - `HostConfig.Resources.Devices.PathOnHost`. As well as the existing `class/<GUID>` - syntax, `<IDType>://<ID>` is now recognised. Support for specific `<IDType>` values - depends on the underlying implementation and Windows version. This change is not - versioned, and affects all API versions if the daemon has this patch. -* `GET /containers/{id}/attach`, `GET /exec/{id}/start`, `GET /containers/{id}/logs` - `GET /services/{id}/logs` and `GET /tasks/{id}/logs` now set Content-Type header - to `application/vnd.docker.multiplexed-stream` when a multiplexed stdout/stderr - stream is sent to client, `application/vnd.docker.raw-stream` otherwise. -* `POST /volumes/create` now accepts a new `ClusterVolumeSpec` to create a cluster - volume (CNI). This option can only be used if the daemon is a Swarm manager. - The Volume response on creation now also can contain a `ClusterVolume` field - with information about the created volume. -* The `BuildCache.Parent` field, as returned by `GET /system/df` is deprecated - and is now omitted. API versions before v1.42 continue to include this field. -* `GET /system/df` now includes a new `Parents` field, for "build-cache" records, - which contains a list of parent IDs for the build-cache record. -* Volume information returned by `GET /volumes/{name}`, `GET /volumes` and - `GET /system/df` can now contain a `ClusterVolume` if the volume is a cluster - volume (requires the daemon to be a Swarm manager). -* The `Volume` type, as returned by `Added new `ClusterVolume` fields -* Added a new `PUT /volumes{name}` endpoint to update cluster volumes (CNI). - Cluster volumes are only supported if the daemon is a Swarm manager. -* `GET /containers/{name}/attach/ws` endpoint now accepts `stdin`, `stdout` and - `stderr` query parameters to only attach to configured streams. - - NOTE: These parameters were documented before in older API versions, but not - actually supported. API versions before v1.42 continue to ignore these parameters - and default to attaching to all streams. To preserve the pre-v1.42 behavior, - set all three query parameters (`?stdin=1,stdout=1,stderr=1`). -* `POST /containers/create` on Linux now respects the `HostConfig.ConsoleSize` property. - Container is immediately created with the desired terminal size and clients no longer - need to set the desired size on their own. -* `POST /containers/create` allow to set `CreateMountpoint` for host path to be - created if missing. This brings parity with `Binds` -* `POST /containers/create` rejects request if BindOptions|VolumeOptions|TmpfsOptions - is set with a non-matching mount Type. -* `POST /containers/{id}/exec` now accepts an optional `ConsoleSize` parameter. - It allows to set the console size of the executed process immediately when it's created. -* `POST /volumes/prune` will now only prune "anonymous" volumes (volumes which were not given a name) by default. A new filter parameter `all` can be set to a truth-y value (`true`, `1`) to get the old behavior. - -## v1.41 API changes - -[Docker Engine API v1.41](https://docs.docker.com/reference/api/engine/version/v1.41/) documentation - -* `GET /events` now returns `prune` events after pruning resources have completed. - Prune events are returned for `container`, `network`, `volume`, `image`, and - `builder`, and have a `reclaimed` attribute, indicating the amount of space - reclaimed (in bytes). -* `GET /info` now returns a `CgroupVersion` field, containing the cgroup version. -* `GET /info` now returns a `DefaultAddressPools` field, containing a list of - custom default address pools for local networks, which can be specified in the - `daemon.json` file or `--default-address-pool` dockerd option. -* `POST /services/create` and `POST /services/{id}/update` now supports `BindOptions.NonRecursive`. -* The `ClusterStore` and `ClusterAdvertise` fields in `GET /info` are deprecated - and are now omitted if they contain an empty value. This change is not versioned, - and affects all API versions if the daemon has this patch. -* The `filter` (singular) query parameter, which was deprecated in favor of the - `filters` option in Docker 1.13, has now been removed from the `GET /images/json` - endpoint. The parameter remains available when using API version 1.40 or below. -* `GET /services` now returns `CapAdd` and `CapDrop` as part of the `ContainerSpec`. -* `GET /services/{id}` now returns `CapAdd` and `CapDrop` as part of the `ContainerSpec`. -* `POST /services/create` now accepts `CapAdd` and `CapDrop` as part of the `ContainerSpec`. -* `POST /services/{id}/update` now accepts `CapAdd` and `CapDrop` as part of the `ContainerSpec`. -* `GET /tasks` now returns `CapAdd` and `CapDrop` as part of the `ContainerSpec`. -* `GET /tasks/{id}` now returns `CapAdd` and `CapDrop` as part of the `ContainerSpec`. -* `GET /services` now returns `Pids` in `TaskTemplate.Resources.Limits`. -* `GET /services/{id}` now returns `Pids` in `TaskTemplate.Resources.Limits`. -* `POST /services/create` now accepts `Pids` in `TaskTemplate.Resources.Limits`. -* `POST /services/{id}/update` now accepts `Pids` in `TaskTemplate.Resources.Limits` - to limit the maximum number of PIDs. -* `GET /tasks` now returns `Pids` in `TaskTemplate.Resources.Limits`. -* `GET /tasks/{id}` now returns `Pids` in `TaskTemplate.Resources.Limits`. -* `POST /containers/create` now accepts a `platform` query parameter in the format - `os[/arch[/variant]]`. - - When set, the daemon checks if the requested image is present in the local image - cache with the given OS and Architecture, and otherwise returns a `404` status. - - If the option is _not_ set, the host's native OS and Architecture are used to - look up the image in the image cache. However, if no platform is passed and the - given image _does_ exist in the local image cache, but its OS or architecture - do not match, the container is created with the available image, and a warning - is added to the `Warnings` field in the response, for example; - - WARNING: The requested image's platform (linux/arm64/v8) does not - match the detected host platform (linux/amd64) and no - specific platform was requested - -* `POST /containers/create` on Linux now accepts the `HostConfig.CgroupnsMode` property. - Set the property to `host` to create the container in the daemon's cgroup namespace, or - `private` to create the container in its own private cgroup namespace. The per-daemon - default is `host`, and can be changed by using the`CgroupNamespaceMode` daemon configuration - parameter. -* `GET /info` now returns an `OSVersion` field, containing the operating system's - version. This change is not versioned, and affects all API versions if the daemon - has this patch. -* `GET /info` no longer returns the `SystemStatus` field if it does not have a - value set. This change is not versioned, and affects all API versions if the - daemon has this patch. -* `GET /services` now accepts query parameter `status`. When set `true`, - services returned will include `ServiceStatus`, which provides Desired, - Running, and Completed task counts for the service. -* `GET /services` may now include `ReplicatedJob` or `GlobalJob` as the `Mode` - in a `ServiceSpec`. -* `GET /services/{id}` may now include `ReplicatedJob` or `GlobalJob` as the - `Mode` in a `ServiceSpec`. -* `POST /services/create` now accepts `ReplicatedJob or `GlobalJob` as the `Mode` - in the `ServiceSpec. -* `POST /services/{id}/update` accepts updating the fields of the - `ReplicatedJob` object in the `ServiceSpec.Mode`. The service mode still - cannot be changed, however. -* `GET /services` now includes `JobStatus` on Services with mode - `ReplicatedJob` or `GlobalJob`. -* `GET /services/{id}` now includes `JobStatus` on Services with mode - `ReplicatedJob` or `GlobalJob`. -* `GET /tasks` now includes `JobIteration` on Tasks spawned from a job-mode - service. -* `GET /tasks/{id}` now includes `JobIteration` on the task if spawned from a - job-mode service. -* `GET /containers/{id}/stats` now accepts a query param (`one-shot`) which, when used with `stream=false` fetches a - single set of stats instead of waiting for two collection cycles to have 2 CPU stats over a 1 second period. -* The `KernelMemory` field in `HostConfig.Resources` is now deprecated. -* The `KernelMemory` field in `Info` is now deprecated. -* `GET /services` now returns `Ulimits` as part of `ContainerSpec`. -* `GET /services/{id}` now returns `Ulimits` as part of `ContainerSpec`. -* `POST /services/create` now accepts `Ulimits` as part of `ContainerSpec`. -* `POST /services/{id}/update` now accepts `Ulimits` as part of `ContainerSpec`. - -## v1.40 API changes - -[Docker Engine API v1.40](https://docs.docker.com/reference/api/engine/version/v1.40/) documentation - -* The `/_ping` endpoint can now be accessed both using `GET` or `HEAD` requests. - when accessed using a `HEAD` request, all headers are returned, but the body - is empty (`Content-Length: 0`). This change is not versioned, and affects all - API versions if the daemon has this patch. Clients are recommended to try - using `HEAD`, but fallback to `GET` if the `HEAD` requests fails. -* `GET /_ping` and `HEAD /_ping` now set `Cache-Control` and `Pragma` headers to - prevent the result from being cached. This change is not versioned, and affects - all API versions if the daemon has this patch. -* `GET /services` now returns `Sysctls` as part of the `ContainerSpec`. -* `GET /services/{id}` now returns `Sysctls` as part of the `ContainerSpec`. -* `POST /services/create` now accepts `Sysctls` as part of the `ContainerSpec`. -* `POST /services/{id}/update` now accepts `Sysctls` as part of the `ContainerSpec`. -* `POST /services/create` now accepts `Config` as part of `ContainerSpec.Privileges.CredentialSpec`. -* `POST /services/{id}/update` now accepts `Config` as part of `ContainerSpec.Privileges.CredentialSpec`. -* `POST /services/create` now includes `Runtime` as an option in `ContainerSpec.Configs` -* `POST /services/{id}/update` now includes `Runtime` as an option in `ContainerSpec.Configs` -* `GET /tasks` now returns `Sysctls` as part of the `ContainerSpec`. -* `GET /tasks/{id}` now returns `Sysctls` as part of the `ContainerSpec`. -* `GET /networks` now supports a `dangling` filter type. When set to `true` (or - `1`), the endpoint returns all networks that are not in use by a container. When - set to `false` (or `0`), only networks that are in use by one or more containers - are returned. -* `GET /nodes` now supports a filter type `node.label` filter to filter nodes based - on the node.label. The format of the label filter is `node.label=<key>`/`node.label=<key>=<value>` - to return those with the specified labels, or `node.label!=<key>`/`node.label!=<key>=<value>` - to return those without the specified labels. -* `POST /containers/create` now accepts a `fluentd-async` option in `HostConfig.LogConfig.Config` - when using the Fluentd logging driver. This option deprecates the `fluentd-async-connect` - option, which remains functional, but will be removed in a future release. Users - are encouraged to use the `fluentd-async` option going forward. This change is - not versioned, and affects all API versions if the daemon has this patch. -* `POST /containers/create` now accepts a `fluentd-request-ack` option in - `HostConfig.LogConfig.Config` when using the Fluentd logging driver. If enabled, - the Fluentd logging driver sends the chunk option with a unique ID. The server - will respond with an acknowledgement. This option improves the reliability of - the message transmission. This change is not versioned, and affects all API - versions if the daemon has this patch. -* `POST /containers/create`, `GET /containers/{id}/json`, and `GET /containers/json` now supports - `BindOptions.NonRecursive`. -* `POST /swarm/init` now accepts a `DataPathPort` property to set data path port number. -* `GET /info` now returns information about `DataPathPort` that is currently used in swarm -* `GET /info` now returns `PidsLimit` boolean to indicate if the host kernel has - PID limit support enabled. -* `GET /info` now includes `name=rootless` in `SecurityOptions` when the daemon is running in - rootless mode. This change is not versioned, and affects all API versions if the daemon has - this patch. -* `GET /info` now returns `none` as `CgroupDriver` when the daemon is running in rootless mode. - This change is not versioned, and affects all API versions if the daemon has this patch. -* `POST /containers/create` now accepts `DeviceRequests` as part of `HostConfig`. - Can be used to set Nvidia GPUs. -* `GET /swarm` endpoint now returns DataPathPort info -* `POST /containers/create` now takes `KernelMemoryTCP` field to set hard limit for kernel TCP buffer memory. -* `GET /service` now returns `MaxReplicas` as part of the `Placement`. -* `GET /service/{id}` now returns `MaxReplicas` as part of the `Placement`. -* `POST /service/create` and `POST /services/(id or name)/update` now take the field `MaxReplicas` - as part of the service `Placement`, allowing to specify maximum replicas per node for the service. -* `POST /containers/create` on Linux now creates a container with `HostConfig.IpcMode=private` - by default, if IpcMode is not explicitly specified. The per-daemon default can be changed - back to `shareable` by using `DefaultIpcMode` daemon configuration parameter. -* `POST /containers/{id}/update` now accepts a `PidsLimit` field to tune a container's - PID limit. Set `0` or `-1` for unlimited. Leave `null` to not change the current value. -* `POST /build` now accepts `outputs` key for configuring build outputs when using BuildKit mode. - -## V1.39 API changes - -[Docker Engine API v1.39](https://docs.docker.com/reference/api/engine/version/v1.39/) documentation - -* `GET /info` now returns an empty string, instead of `<unknown>` for `KernelVersion` - and `OperatingSystem` if the daemon was unable to obtain this information. -* `GET /info` now returns information about the product license, if a license - has been applied to the daemon. -* `GET /info` now returns a `Warnings` field, containing warnings and informational - messages about missing features, or issues related to the daemon configuration. -* `POST /swarm/init` now accepts a `DefaultAddrPool` property to set global scope default address pool -* `POST /swarm/init` now accepts a `SubnetSize` property to set global scope networks by giving the - length of the subnet masks for every such network -* `POST /session` (added in [V1.31](#v131-api-changes) is no longer experimental. - This endpoint can be used to run interactive long-running protocols between the - client and the daemon. - -## V1.38 API changes - -[Docker Engine API v1.38](https://docs.docker.com/reference/api/engine/version/v1.38/) documentation - - -* `GET /tasks` and `GET /tasks/{id}` now return a `NetworkAttachmentSpec` field, - containing the `ContainerID` for non-service containers connected to "attachable" - swarm-scoped networks. - -## v1.37 API changes - -[Docker Engine API v1.37](https://docs.docker.com/reference/api/engine/version/v1.37/) documentation - -* `POST /containers/create` and `POST /services/create` now supports exposing SCTP ports. -* `POST /configs/create` and `POST /configs/{id}/create` now accept a `Templating` driver. -* `GET /configs` and `GET /configs/{id}` now return the `Templating` driver of the config. -* `POST /secrets/create` and `POST /secrets/{id}/create` now accept a `Templating` driver. -* `GET /secrets` and `GET /secrets/{id}` now return the `Templating` driver of the secret. - -## v1.36 API changes - -[Docker Engine API v1.36](https://docs.docker.com/reference/api/engine/version/v1.36/) documentation - -* `Get /events` now return `exec_die` event when an exec process terminates. - - -## v1.35 API changes - -[Docker Engine API v1.35](https://docs.docker.com/reference/api/engine/version/v1.35/) documentation - -* `POST /services/create` and `POST /services/(id)/update` now accepts an - `Isolation` field on container spec to set the Isolation technology of the - containers running the service (`default`, `process`, or `hyperv`). This - configuration is only used for Windows containers. -* `GET /containers/(name)/logs` now supports an additional query parameter: `until`, - which returns log lines that occurred before the specified timestamp. -* `POST /containers/{id}/exec` now accepts a `WorkingDir` property to set the - work-dir for the exec process, independent of the container's work-dir. -* `Get /version` now returns a `Platform.Name` field, which can be used by products - using Moby as a foundation to return information about the platform. -* `Get /version` now returns a `Components` field, which can be used to return - information about the components used. Information about the engine itself is - now included as a "Component" version, and contains all information from the - top-level `Version`, `GitCommit`, `APIVersion`, `MinAPIVersion`, `GoVersion`, - `Os`, `Arch`, `BuildTime`, `KernelVersion`, and `Experimental` fields. Going - forward, the information from the `Components` section is preferred over their - top-level counterparts. - - -## v1.34 API changes - -[Docker Engine API v1.34](https://docs.docker.com/reference/api/engine/version/v1.34/) documentation - -* `POST /containers/(name)/wait?condition=removed` now also also returns - in case of container removal failure. A pointer to a structure named - `Error` added to the response JSON in order to indicate a failure. - If `Error` is `null`, container removal has succeeded, otherwise - the test of an error message indicating why container removal has failed - is available from `Error.Message` field. - -## v1.33 API changes - -[Docker Engine API v1.33](https://docs.docker.com/reference/api/engine/version/v1.33/) documentation - -* `GET /events` now supports filtering 4 more kinds of events: `config`, `node`, -`secret` and `service`. - -## v1.32 API changes - -[Docker Engine API v1.32](https://docs.docker.com/reference/api/engine/version/v1.32/) documentation - -* `POST /images/create` now accepts a `platform` parameter in the form of `os[/arch[/variant]]`. -* `POST /containers/create` now accepts additional values for the - `HostConfig.IpcMode` property. New values are `private`, `shareable`, - and `none`. -* `DELETE /networks/{id or name}` fixed issue where a `name` equal to another - network's name was able to mask that `id`. If both a network with the given - _name_ exists, and a network with the given _id_, the network with the given - _id_ is now deleted. This change is not versioned, and affects all API versions - if the daemon has this patch. - -## v1.31 API changes - -[Docker Engine API v1.31](https://docs.docker.com/reference/api/engine/version/v1.31/) documentation - -* `DELETE /secrets/(name)` now returns status code 404 instead of 500 when the secret does not exist. -* `POST /secrets/create` now returns status code 409 instead of 500 when creating an already existing secret. -* `POST /secrets/create` now accepts a `Driver` struct, allowing the - `Name` and driver-specific `Options` to be passed to store a secrets - in an external secrets store. The `Driver` property can be omitted - if the default (internal) secrets store is used. -* `GET /secrets/(id)` and `GET /secrets` now return a `Driver` struct, - containing the `Name` and driver-specific `Options` of the external - secrets store used to store the secret. The `Driver` property is - omitted if no external store is used. -* `POST /secrets/(name)/update` now returns status code 400 instead of 500 when updating a secret's content which is not the labels. -* `POST /nodes/(name)/update` now returns status code 400 instead of 500 when demoting last node fails. -* `GET /networks/(id or name)` now takes an optional query parameter `scope` that will filter the network based on the scope (`local`, `swarm`, or `global`). -* `POST /session` is a new endpoint that can be used for running interactive long-running protocols between client and - the daemon. This endpoint is experimental and only available if the daemon is started with experimental features - enabled. -* `GET /images/(name)/get` now includes an `ImageMetadata` field which contains image metadata that is local to the engine and not part of the image config. -* `POST /services/create` now accepts a `PluginSpec` when `TaskTemplate.Runtime` is set to `plugin` -* `GET /events` now supports config events `create`, `update` and `remove` that are emitted when users create, update or remove a config -* `GET /volumes/` and `GET /volumes/{name}` now return a `CreatedAt` field, - containing the date/time the volume was created. This field is omitted if the - creation date/time for the volume is unknown. For volumes with scope "global", - this field represents the creation date/time of the local _instance_ of the - volume, which may differ from instances of the same volume on different nodes. -* `GET /system/df` now returns a `CreatedAt` field for `Volumes`. Refer to the - `/volumes/` endpoint for a description of this field. - -## v1.30 API changes - -[Docker Engine API v1.30](https://docs.docker.com/reference/api/engine/version/v1.30/) documentation - -* `GET /info` now returns the list of supported logging drivers, including plugins. -* `GET /info` and `GET /swarm` now returns the cluster-wide swarm CA info if the node is in a swarm: the cluster root CA certificate, and the cluster TLS - leaf certificate issuer's subject and public key. It also displays the desired CA signing certificate, if any was provided as part of the spec. -* `POST /build/` now (when not silent) produces an `Aux` message in the JSON output stream with payload `types.BuildResult` for each image produced. The final such message will reference the image resulting from the build. -* `GET /nodes` and `GET /nodes/{id}` now returns additional information about swarm TLS info if the node is part of a swarm: the trusted root CA, and the - issuer's subject and public key. -* `GET /distribution/(name)/json` is a new endpoint that returns a JSON output stream with payload `types.DistributionInspect` for an image name. It includes a descriptor with the digest, and supported platforms retrieved from directly contacting the registry. -* `POST /swarm/update` now accepts 3 additional parameters as part of the swarm spec's CA configuration; the desired CA certificate for - the swarm, the desired CA key for the swarm (if not using an external certificate), and an optional parameter to force swarm to - generate and rotate to a new CA certificate/key pair. -* `POST /service/create` and `POST /services/(id or name)/update` now take the field `Platforms` as part of the service `Placement`, allowing to specify platforms supported by the service. -* `POST /containers/(name)/wait` now accepts a `condition` query parameter to indicate which state change condition to wait for. Also, response headers are now returned immediately to acknowledge that the server has registered a wait callback for the client. -* `POST /swarm/init` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic -* `POST /swarm/join` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic -* `GET /events` now supports service, node and secret events which are emitted when users create, update and remove service, node and secret -* `GET /events` now supports network remove event which is emitted when users remove a swarm scoped network -* `GET /events` now supports a filter type `scope` in which supported value could be swarm and local -* `PUT /containers/(name)/archive` now accepts a `copyUIDGID` parameter to allow copy UID/GID maps to dest file or dir. - -## v1.29 API changes - -[Docker Engine API v1.29](https://docs.docker.com/reference/api/engine/version/v1.29/) documentation - -* `DELETE /networks/(name)` now allows to remove the ingress network, the one used to provide the routing-mesh. -* `POST /networks/create` now supports creating the ingress network, by specifying an `Ingress` boolean field. As of now this is supported only when using the overlay network driver. -* `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one. -* `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`). -* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass. -* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. -* `POST /containers/prune`, `POST /images/prune`, `POST /volumes/prune`, and `POST /networks/prune` now support a `label` filter to filter containers, images, volumes, or networks based on the label. The format of the label filter could be `label=<key>`/`label=<key>=<value>` to remove those with the specified labels, or `label!=<key>`/`label!=<key>=<value>` to remove those without the specified labels. -* `POST /services/create` now accepts `Privileges` as part of `ContainerSpec`. Privileges currently include - `CredentialSpec` and `SELinuxContext`. - -## v1.28 API changes - -[Docker Engine API v1.28](https://docs.docker.com/reference/api/engine/version/v1.28/) documentation - -* `POST /containers/create` now includes a `Consistency` field to specify the consistency level for each `Mount`, with possible values `default`, `consistent`, `cached`, or `delegated`. -* `GET /containers/create` now takes a `DeviceCgroupRules` field in `HostConfig` allowing to set custom device cgroup rules for the created container. -* Optional query parameter `verbose` for `GET /networks/(id or name)` will now list all services with all the tasks, including the non-local tasks on the given network. -* `GET /containers/(id or name)/attach/ws` now returns WebSocket in binary frame format for API version >= v1.28, and returns WebSocket in text frame format for API version< v1.28, for the purpose of backward-compatibility. -* `GET /networks` is optimised only to return list of all networks and network specific information. List of all containers attached to a specific network is removed from this API and is only available using the network specific `GET /networks/{network-id}`. -* `GET /containers/json` now supports `publish` and `expose` filters to filter containers that expose or publish certain ports. -* `POST /services/create` and `POST /services/(id or name)/update` now accept the `ReadOnly` parameter, which mounts the container's root filesystem as read only. -* `POST /build` now accepts `extrahosts` parameter to specify a host to ip mapping to use during the build. -* `POST /services/create` and `POST /services/(id or name)/update` now accept a `rollback` value for `FailureAction`. -* `POST /services/create` and `POST /services/(id or name)/update` now accept an optional `RollbackConfig` object which specifies rollback options. -* `GET /services` now supports a `mode` filter to filter services based on the service mode (either `global` or `replicated`). -* `POST /containers/(name)/update` now supports updating `NanoCpus` that represents CPU quota in units of 10<sup>-9</sup> CPUs. -* `POST /plugins/{name}/disable` now accepts a `force` query-parameter to disable a plugin even if still in use. - -## v1.27 API changes - -[Docker Engine API v1.27](https://docs.docker.com/reference/api/engine/version/v1.27/) documentation - -* `GET /containers/(id or name)/stats` now includes an `online_cpus` field in both `precpu_stats` and `cpu_stats`. If this field is `nil` then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. - -## v1.26 API changes - -[Docker Engine API v1.26](https://docs.docker.com/reference/api/engine/version/v1.26/) documentation - -* `POST /plugins/(plugin name)/upgrade` upgrade a plugin. - -## v1.25 API changes - -[Docker Engine API v1.25](https://docs.docker.com/reference/api/engine/version/v1.25/) documentation - -* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`. -* `GET /version` now returns `MinAPIVersion`. -* `POST /build` accepts `networkmode` parameter to specify network used during build. -* `GET /images/(name)/json` now returns `OsVersion` if populated -* `GET /images/(name)/json` no longer contains the `RootFS.BaseLayer` field. This - field was used for Windows images that used a base-image that was pre-installed - on the host (`RootFS.Type` `layers+base`), which is no longer supported, and - the `RootFS.BaseLayer` field has been removed. -* `GET /info` now returns `Isolation`. -* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits. -* `GET /containers/json` and `GET /containers/(id or name)/json` now return `"removing"` as a value for the `State.Status` field if the container is being removed. Previously, "exited" was returned as status. -* `GET /containers/json` now accepts `removing` as a valid value for the `status` filter. -* `GET /containers/json` now supports filtering containers by `health` status. -* `DELETE /volumes/(name)` now accepts a `force` query parameter to force removal of volumes that were already removed out of band by the volume driver plugin. -* `POST /containers/create/` and `POST /containers/(name)/update` now validates restart policies. -* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`). -* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds`, `Volumes`, and `Tmpfs`. *note*: `Binds`, `Volumes`, and `Tmpfs` are still available and can be combined with `Mounts`. -* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions. -* `POST /build` accepts `cachefrom` parameter to specify images used for build cache. -* `GET /networks/` endpoint now correctly returns a list of *all* networks, - instead of the default network if a trailing slash is provided, but no `name` - or `id`. -* `DELETE /containers/(name)` endpoint now returns an error of `removal of container name is already in progress` with status code of 400, when container name is in a state of removal in progress. -* `GET /containers/json` now supports a `is-task` filter to filter - containers that are tasks (part of a service in swarm mode). -* `POST /containers/create` now takes `StopTimeout` field. -* `POST /services/create` and `POST /services/(id or name)/update` now accept `Monitor` and `MaxFailureRatio` parameters, which control the response to failures during service updates. -* `POST /services/(id or name)/update` now accepts a `ForceUpdate` parameter inside the `TaskTemplate`, which causes the service to be updated even if there are no changes which would ordinarily trigger an update. -* `POST /services/create` and `POST /services/(id or name)/update` now return a `Warnings` array. -* `GET /networks/(name)` now returns field `Created` in response to show network created time. -* `POST /containers/(id or name)/exec` now accepts an `Env` field, which holds a list of environment variables to be set in the context of the command execution. -* `GET /volumes`, `GET /volumes/(name)`, and `POST /volumes/create` now return the `Options` field which holds the driver specific options to use for when creating the volume. -* `GET /exec/(id)/json` now returns `Pid`, which is the system pid for the exec'd process. -* `POST /containers/prune` prunes stopped containers. -* `POST /images/prune` prunes unused images. -* `POST /volumes/prune` prunes unused volumes. -* `POST /networks/prune` prunes unused networks. -* Every API response now includes a `Docker-Experimental` header specifying if experimental features are enabled (value can be `true` or `false`). -* Every API response now includes a `API-Version` header specifying the default API version of the server. -* The `hostConfig` option now accepts the fields `CpuRealtimePeriod` and `CpuRtRuntime` to allocate cpu runtime to rt tasks when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel. -* The `SecurityOptions` field within the `GET /info` response now includes `userns` if user namespaces are enabled in the daemon. -* `GET /nodes` and `GET /node/(id or name)` now return `Addr` as part of a node's `Status`, which is the address that that node connects to the manager from. -* The `HostConfig` field now includes `NanoCpus` that represents CPU quota in units of 10<sup>-9</sup> CPUs. -* `GET /info` now returns more structured information about security options. -* The `HostConfig` field now includes `CpuCount` that represents the number of CPUs available for execution by the container. Windows daemon only. -* `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. -* `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. -* `POST /services/create` and `POST /services/(id or name)/update` now support - `node.platform.arch` and `node.platform.os` constraints in the services - `TaskSpec.Placement.Constraints` field. -* `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. -* `GET /plugins` list plugins. -* `POST /plugins/pull?name=<plugin name>` pulls a plugin. -* `GET /plugins/(plugin name)` inspect a plugin. -* `POST /plugins/(plugin name)/set` configure a plugin. -* `POST /plugins/(plugin name)/enable` enable a plugin. -* `POST /plugins/(plugin name)/disable` disable a plugin. -* `POST /plugins/(plugin name)/push` push a plugin. -* `POST /plugins/create?name=(plugin name)` create a plugin. -* `DELETE /plugins/(plugin name)` delete a plugin. -* `POST /node/(id or name)/update` now accepts both `id` or `name` to identify the node to update. -* `GET /images/json` now support a `reference` filter. -* `GET /secrets` returns information on the secrets. -* `POST /secrets/create` creates a secret. -* `DELETE /secrets/{id}` removes the secret `id`. -* `GET /secrets/{id}` returns information on the secret `id`. -* `POST /secrets/{id}/update` updates the secret `id`. -* `POST /services/(id or name)/update` now accepts service name or prefix of service id as a parameter. -* `POST /containers/create` added 2 built-in log-opts that work on all logging drivers, - `mode` (`blocking`|`non-blocking`), and `max-buffer-size` (e.g. `2m`) which enables a non-blocking log buffer. -* `POST /containers/create` now takes `HostConfig.Init` field to run an init - inside the container that forwards signals and reaps processes. - -## v1.24 API changes - -[Docker Engine API v1.24](v1.24.md) documentation - -* `POST /containers/create` now takes `StorageOpt` field. -* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported. -* `GET /info` no longer returns the `ExecutionDriver` property. This property was no longer used after integration - with ContainerD in Docker 1.11. -* `GET /networks` now supports filtering by `label` and `driver`. -* `GET /containers/json` now supports filtering containers by `network` name or id. -* `POST /containers/create` now takes `IOMaximumBandwidth` and `IOMaximumIOps` fields. Windows daemon only. -* `POST /containers/create` now returns an HTTP 400 "bad parameter" message - if no command is specified (instead of an HTTP 500 "server error") -* `GET /images/search` now takes a `filters` query parameter. -* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. -* `GET /events` now supports filtering by daemon name or ID. -* `GET /events` now supports a `detach` event that is emitted on detaching from container process. -* `GET /events` now supports an `exec_detach ` event that is emitted on detaching from exec process. -* `GET /images/json` now supports filters `since` and `before`. -* `POST /containers/(id or name)/start` no longer accepts a `HostConfig`. -* `POST /images/(name)/tag` no longer has a `force` query parameter. -* `GET /images/search` now supports maximum returned search results `limit`. -* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version. -* API errors are now returned as JSON instead of plain text. -* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container. -* `POST /containers/<container ID>/exec` and `POST /exec/<exec ID>/start` - no longer expects a "Container" field to be present. This property was not used - and is no longer sent by the docker client. -* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname). -* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:<name|id>`, - to have the container join the PID namespace of an existing container. - -## v1.23 API changes - -* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`. -* `GET /containers/json` returns the mount points for the container. -* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not. -* `GET /networks/(name)` now returns an `EnableIPv6` field showing whether the network has ipv6 enabled or not. -* `POST /containers/(name)/update` now supports updating container's restart policy. -* `POST /networks/create` now supports enabling ipv6 on the network by setting the `EnableIPv6` field (doing this with a label will no longer work). -* `GET /info` now returns `CgroupDriver` field showing what cgroup driver the daemon is using; `cgroupfs` or `systemd`. -* `GET /info` now returns `KernelMemory` field, showing if "kernel memory limit" is supported. -* `POST /containers/create` now takes `PidsLimit` field, if the kernel is >= 4.3 and the pids cgroup is supported. -* `GET /containers/(id or name)/stats` now returns `pids_stats`, if the kernel is >= 4.3 and the pids cgroup is supported. -* `POST /containers/create` now allows you to override usernamespaces remapping and use privileged options for the container. -* `POST /containers/create` now allows specifying `nocopy` for named volumes, which disables automatic copying from the container path to the volume. -* `POST /auth` now returns an `IdentityToken` when supported by a registry. -* `POST /containers/create` with both `Hostname` and `Domainname` fields specified will result in the container's hostname being set to `Hostname`, rather than `Hostname.Domainname`. -* `GET /volumes` now supports more filters, new added filters are `name` and `driver`. -* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs. -* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details. - -## v1.22 API changes - -* The `HostConfig.LxcConf` field has been removed, and is no longer available on - `POST /containers/create` and `GET /containers/(id)/json`. -* `POST /container/(name)/update` updates the resources of a container. -* `GET /containers/json` supports filter `isolation` on Windows. -* `GET /containers/json` now returns the list of networks of containers. -* `GET /info` Now returns `Architecture` and `OSType` fields, providing information - about the host architecture and operating system type that the daemon runs on. -* `GET /networks/(name)` now returns a `Name` field for each container attached to the network. -* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it - consistent with other date/time values returned by the API. -* `AuthConfig` now supports a `registrytoken` for token based authentication -* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` -* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` - will be cancelled if the HTTP connection making the API request is closed before - the push or pull completes. -* `POST /containers/create` now allows you to set a read/write rate limit for a - device (in bytes per second or IO per second). -* `GET /networks` now supports filtering by `name`, `id` and `type`. -* `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. -* `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. -* `GET /info` now includes the number of containers running, stopped, and paused. -* `POST /networks/create` now supports restricting external access to the network by setting the `Internal` field. -* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network -* `GET /containers/(id)/json` now returns the `NetworkID` of containers. -* `POST /networks/create` Now supports an options field in the IPAM config that provides options - for custom IPAM plugins. -* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any - are available. -* `GET /networks/<network-id>` now returns subnets info for user-defined networks. -* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications - that are built on top of engine. - -## v1.21 API changes - -* `GET /volumes` lists volumes from all volume drivers. -* `POST /volumes/create` to create a volume. -* `GET /volumes/(name)` get low-level information about a volume. -* `DELETE /volumes/(name)` remove a volume with the specified name. -* `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. -* `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. -* The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. -* `GET /containers/(id)/stats` will return networking information respectively for each interface. -* The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. -* `POST /build` now optionally takes a serialized map of build-time variables. -* `GET /events` now includes a `timenano` field, in addition to the existing `time` field. -* `GET /events` now supports filtering by image and container labels. -* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. -* `GET /containers/json` will return `ImageID` of the image used by container. -* `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. -* `POST /containers/create` now takes `KernelMemory` in HostConfig to specify kernel memory limit. -* `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. -* `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, - detailing network settings per network. This field deprecates the - `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, - `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which - are still returned for backward-compatibility, but will be removed in a future version. -* `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, - detailing networksettings per network. This field deprecates the - `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, - `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which - are still returned for backward-compatibility, but will be removed in a future version. -* The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the - badness heuristic. This heuristic selects which processes the OOM killer kills - under out-of-memory conditions. - -## v1.20 API changes - -* `GET /containers/(id)/archive` get an archive of filesystem content from a container. -* `PUT /containers/(id)/archive` upload an archive of content to be extracted to -an existing directory inside a container's filesystem. -* `POST /containers/(id)/copy` is deprecated in favor of the above `archive` -endpoint which can be used to download files and directories from a container. -* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a -list of additional groups that the container process will run as. - -## v1.19 API changes - -* When the daemon detects a version mismatch with the client, usually when -the client is newer than the daemon, an HTTP 400 is now returned instead -of a 404. -* `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. -* `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. -* `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and -`SwapLimit` are now returned as boolean instead of as an int. In addition, the -end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and -`OomKillDisable`. -* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` -* `POST /build` accepts `cpuperiod` and `cpuquota` options - -## v1.18 API changes - -* `GET /version` now returns `Os`, `Arch` and `KernelVersion`. -* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. -* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. -* `GET /images/json` added a `RepoDigests` field to include image digest information. -* `POST /build` can now set resource constraints for all containers created for the build. -* `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. -* `POST /build` closing the HTTP request cancels the build -* `POST /containers/(id)/exec` includes `Warnings` field to response. diff --git a/_vendor/modules.txt b/_vendor/modules.txt index 5b934fd1d8d..eed74d6eeaa 100644 --- a/_vendor/modules.txt +++ b/_vendor/modules.txt @@ -1,6 +1,6 @@ -# github.com/moby/moby v28.1.0-rc.2+incompatible -# github.com/moby/buildkit v0.22.0-rc1 -# github.com/docker/buildx v0.23.0 -# github.com/docker/cli v28.1.1+incompatible -# github.com/docker/compose/v2 v2.36.0 -# github.com/docker/scout-cli v1.15.0 +# github.com/moby/moby/api v1.54.1 +# github.com/moby/buildkit v0.29.0 +# github.com/docker/buildx v0.33.0 +# github.com/docker/cli v29.4.0+incompatible +# github.com/docker/compose/v5 v5.1.2 +# github.com/docker/model-runner v1.1.36 diff --git a/assets/css/code.css b/assets/css/code.css deleted file mode 100644 index fa4bb4bd34b..00000000000 --- a/assets/css/code.css +++ /dev/null @@ -1,81 +0,0 @@ -@layer components { - .prose { - .highlight, - :not(pre) > code { - font-size: 0.875em; - border: 1px solid; - border-radius: theme("spacing.1"); - background: theme("colors.white"); - border-color: theme("colors.gray.light.300"); - .dark & { - background: theme("colors.gray.dark.200"); - border-color: theme("colors.gray.dark.300"); - } - } - - :not(pre) > code { - background: theme("colors.gray.light.200"); - display: inline-block; - margin: 0; - font-weight: 400; - overflow-wrap: anywhere; - padding: 0 4px; - } - - table:not(.lntable) code { - overflow-wrap: unset; - white-space: nowrap; - } - - /* Indented code blocks */ - pre:not(.chroma) { - @apply my-4 overflow-x-auto p-3; - font-size: 0.875em; - border: 1px solid; - border-radius: theme("spacing.1"); - background: theme("colors.white"); - border-color: theme("colors.gray.light.300"); - .dark & { - background: theme("colors.gray.dark.200"); - border-color: theme("colors.gray.dark.300"); - } - } - - .highlight { - @apply my-4 overflow-x-auto p-3; - - /* LineTableTD */ - .lntd { - vertical-align: top; - padding: 0; - margin: 0; - font-weight: 400; - padding: 0 4px; - &:first-child { - width: 0; - } - } - - /* LineTableTD */ - .lntd { - vertical-align: top; - padding: 0; - margin: 0; - border: 0; - } - /* LineTable */ - .lntable { - display: table; - width: 100%; - border-spacing: 0; - padding: 0; - margin: 0; - border: 0; - /* LineNumberColumnHighlight */ - .lntd:first-child .hl { - display: block; - } - } - } - } -} diff --git a/assets/css/components.css b/assets/css/components.css new file mode 100644 index 00000000000..a0f711f2dd8 --- /dev/null +++ b/assets/css/components.css @@ -0,0 +1,130 @@ +@layer components { + .card { + @apply mt-2 mb-2 flex flex-col gap-2 rounded-sm border border-gray-200 p-3; + @apply dark:border-gray-700 dark:bg-gray-900; + @apply transition-shadow duration-200; + &:hover, + &:focus { + @apply border-gray-300 dark:border-gray-600; + } + } + .card-link:hover { + @apply !no-underline; + } + .card-header { + @apply mb-2 flex items-center gap-2; + @apply text-gray-700 dark:text-gray-100; + } + .card-icon { + @apply text-gray-700 dark:text-gray-100; + } + .card-img, + .card-img svg { + @apply m-0 flex max-h-5 min-h-5 max-w-5 min-w-5 items-center justify-center fill-current; + } + .card-title { + @apply font-semibold; + } + .card-link { + @apply block text-inherit no-underline hover:underline; + } + .card-description { + @apply text-gray-600; + @apply dark:text-gray-300; + } + + .admonition { + @apply relative mb-4 flex w-full flex-col items-start gap-3 rounded-sm px-6 py-4; + @apply bg-gray-50 dark:bg-gray-900; + } + .admonition-header { + @apply flex flex-wrap items-center gap-2; + } + .admonition-title { + @apply font-semibold; + } + .admonition-content { + @apply w-full min-w-0 flex-1 flex-wrap overflow-x-auto break-words; + color: var(--tw-prose-body); + } + .admonition-note { + @apply border-blue-400 bg-blue-50 text-blue-900; + @apply dark:border-blue-600 dark:bg-blue-950 dark:text-blue-100; + } + .admonition-tip { + @apply border-green-400 bg-green-100 text-green-900; + @apply dark:border-green-600 dark:bg-green-950 dark:text-green-100; + } + .admonition-warning { + @apply border-yellow-400 bg-yellow-50 text-yellow-900; + @apply dark:border-yellow-600 dark:bg-yellow-950 dark:text-yellow-100; + } + .admonition-danger { + @apply border-red-400 bg-red-50 text-red-900; + @apply dark:border-red-600 dark:bg-red-950 dark:text-red-100; + } + .admonition-important { + @apply border-purple-400 bg-purple-50 text-purple-900; + @apply dark:border-purple-600 dark:bg-purple-950 dark:text-purple-100; + } + .admonition-icon { + @apply flex-shrink-0; + width: 24px; + height: 24px; + min-width: 24px; + min-height: 24px; + display: flex; + align-items: center; + justify-content: center; + } + .admonition p{ + margin-bottom: 1em; + } + .admonition ul{ + @apply list-disc pl-5 mb-1; + } + + .download-links { + @apply my-0 text-gray-600 dark:text-gray-400 rounded-sm border-1 border-gray-100 bg-gray-100/10 px-2 py-1; + @apply dark:border-gray-800 dark:bg-gray-900; + font-size: 86%; + } + + .download-links a { + @apply link; + } + + .download-links-subcontainer { + @apply flex flex-row gap-2 justify-between; + ul{ + @apply m-0 p-0 list-none; + li{ + @apply p-0 m-0; + } + } + } + + .card-image { + @apply h-12 w-12 overflow-hidden; + } + + .button { + @apply my-2 mr-2 inline-block rounded-sm bg-blue-500 p-1 px-3 text-white hover:bg-blue-600 dark:bg-blue-500 hover:dark:bg-blue-400; + } + + .summary-bar { + @apply my-1 mt-4 flex flex-col rounded-sm border-1 border-gray-100 bg-gray-50 p-4 dark:border-gray-800 dark:bg-gray-900; + } + + .tabs { + @apply bg-blue/2 rounded-sm p-2; + } + .tablist { + @apply mb-1 border-b border-gray-100 dark:border-gray-800; + } + + .tab-item { + @apply inline-block rounded-t-sm px-3 py-2 hover:bg-gray-100 dark:hover:bg-gray-900; + @apply dark:text-gray-200; + } +} \ No newline at end of file diff --git a/assets/css/global.css b/assets/css/global.css index fa6742830e8..ba29b4dc6d1 100644 --- a/assets/css/global.css +++ b/assets/css/global.css @@ -1,89 +1,110 @@ /* global styles */ -@layer base { - [x-cloak=""] { +[x-cloak=""] { + display: none !important; +} +/* alpine cloak for small screens only */ +[x-cloak="sm"] { + @media (width <= 768px) { display: none !important; } - /* alpine cloak for small screens only */ - [x-cloak="sm"] { - @media (width <= 768px) { - display: none !important; - } - } +} +/* Theme toggle icon visibility — driven by data-theme-preference on <html>, + set synchronously by theme.js before first paint. x-show handles updates + after Alpine initialises; these rules cover the pre-Alpine window. */ +:root[data-theme-preference="light"] .theme-icon-moon, +:root[data-theme-preference="light"] .theme-icon-auto, +:root[data-theme-preference="dark"] .theme-icon-sun, +:root[data-theme-preference="dark"] .theme-icon-auto, +:root[data-theme-preference="auto"] .theme-icon-sun, +:root[data-theme-preference="auto"] .theme-icon-moon { + display: none; +} - :root { - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; +:root { + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; - scrollbar-color: theme(colors.gray.light.400) theme(colors.black / 0.05); - &.dark { - scrollbar-color: theme(colors.gray.dark.800) theme(colors.white / 0.10); - } + scrollbar-color: var(--color-gray-400) rgba(0, 0, 0, 0.05); + &.dark { + scrollbar-color: var(--color-gray-700) rgba(255, 255, 255, 0.1); } +} - mark { - @apply bg-transparent font-bold text-blue-light dark:text-blue-dark; - } +mark { + @apply bg-transparent font-bold text-blue-500 dark:text-blue-400; +} - /* Hide the clear (X) button for search inputs */ - /* Chrome, Safari, Edge, and Opera */ - input[type="search"]::-webkit-search-cancel-button { - -webkit-appearance: none; - appearance: none; - } - - /* Firefox */ - input[type="search"]::-moz-search-cancel-button { - display: none; - } - - /* Internet Explorer and Edge (legacy) */ - input[type="search"]::-ms-clear { - display: none; - } +/* Hide the clear (X) button for search inputs */ +/* Chrome, Safari, Edge, and Opera */ +input[type="search"]::-webkit-search-cancel-button { + -webkit-appearance: none; + appearance: none; } -/* utility classes */ +/* Firefox */ +input[type="search"]::-moz-search-cancel-button { + display: none; +} -@layer utilities { - .link { - @apply text-blue-light underline underline-offset-2 dark:text-blue-dark; +/* Internet Explorer and Edge (legacy) */ +input[type="search"]::-ms-clear { + display: none; +} +.prose { + hr { + @apply mt-8 mb-4; } - - .invertible { - @apply dark:hue-rotate-180 dark:invert dark:filter; + :where(h1):not(:where([class~="not-prose"], [class~="not-prose"] *)) { + font-weight: 500 !important; + font-size: 180% !important; + margin-bottom: 0.4em !important; } - - .bg-pattern-blue { - background-color: theme(colors.white / 50%); - background-image: url('/assets/images/bg-pattern-blue.webp'); - background-blend-mode: overlay; - background-size: cover; - background-repeat: none; - .dark & { - background-color: theme(colors.black / 70%); + > h2 { + @apply mt-7! mb-3!; + font-size: 160% !important; + a { + @apply hover:no-underline!; } } - - .bg-pattern-purple { - background-color: theme(colors.white / 50%); - background-image: url('/assets/images/bg-pattern-purple.webp'); - background-blend-mode: overlay; - background-size: cover; - background-repeat: none; - .dark & { - background-color: theme(colors.black / 70%); + > h3 { + font-size: 130% !important; + a { + @apply hover:no-underline!; } } - - .bg-pattern-verde { - background-color: theme(colors.white / 50%); - background-image: url('/assets/images/bg-pattern-verde.webp'); - background-blend-mode: overlay; - background-size: cover; - background-repeat: none; - .dark & { - background-color: theme(colors.black / 70%); + > h4 { + a { + @apply hover:no-underline!; + } + } + > h5 { + a { + @apply hover:no-underline!; } } + ol { + list-style-type: decimal; + } + + ol ol { + list-style-type: lower-alpha; + } + + ol ol ol { + list-style-type: lower-roman; + } +} +.navbar-group:first-of-type { + margin-top: 0.2rem !important; +} + +#search-page-results { + mark:where(.dark, .dark *) { + color: var(--color-blue-400); + } +} + +code { + font-size: 0.9em; } diff --git a/assets/css/icons.css b/assets/css/icons.css deleted file mode 100644 index 08428273b26..00000000000 --- a/assets/css/icons.css +++ /dev/null @@ -1,29 +0,0 @@ -@layer utilities { - .icon-svg { - svg { - font-size: 24px; - width: 1em; - height: 1em; - display: inline-block; - fill: currentColor; - } - } - - .icon-xs { - svg { - font-size: 12px; - } - } - - .icon-sm { - svg { - font-size: 16px; - } - } - - .icon-lg { - svg { - font-size: 32px; - } - } -} diff --git a/assets/css/lists.css b/assets/css/lists.css deleted file mode 100644 index 249f71f4f2a..00000000000 --- a/assets/css/lists.css +++ /dev/null @@ -1,12 +0,0 @@ -.prose ol { - list-style-type: decimal; -} - -.prose ol ol { - list-style-type: lower-alpha; -} - -.prose ol ol ol { - list-style-type: lower-roman; -} - diff --git a/assets/css/pagefind.css b/assets/css/pagefind.css new file mode 100644 index 00000000000..a6619c3b104 --- /dev/null +++ b/assets/css/pagefind.css @@ -0,0 +1,25 @@ +/* Pagefind Component UI Customizations */ + +/* Dark mode variables for modal */ +.dark pagefind-modal { + --pf-text: var(--color-gray-100); + --pf-text-secondary: var(--color-gray-300); + --pf-text-muted: var(--color-gray-400); + --pf-background: var(--color-gray-900); + --pf-border: var(--color-gray-700); + --pf-border-focus: var(--color-blue-400); + --pf-hover: var(--color-gray-800); +} + +/* Highlight marks in results */ +pagefind-results mark { + background-color: var(--color-yellow-200); + color: inherit; + padding: 0 0.125rem; + border-radius: 0.125rem; +} + +.dark pagefind-results mark { + background-color: rgba(255, 204, 72, 0.3); + color: white; +} diff --git a/assets/css/style.css b/assets/css/style.css new file mode 100644 index 00000000000..f63fc6f5aec --- /dev/null +++ b/assets/css/style.css @@ -0,0 +1,47 @@ +/* Main CSS entry point */ +@import "tailwindcss"; +@plugin "@tailwindcss/typography"; +@source "hugo_stats.json"; + +@font-face { + font-family: "Roboto Flex"; + src: url("/assets/fonts/RobotoFlex.woff2") format("woff2"); + font-weight: 100 1000; /* Range of weights Roboto Flex supports */ + font-stretch: 100%; /* Range of width Roboto Flex supports */ + font-style: oblique 0deg 10deg; /* Range of oblique angle Roboto Flex supports */ + font-display: fallback; +} + +/* Roboto Mono */ +@font-face { + font-family: "Roboto Mono"; + src: url("/assets/fonts/RobotoMono-Regular.woff2") format("woff2"); + font-weight: 100 700; /* Define the range of weight the variable font supports */ + font-style: normal; + font-display: fallback; +} + +/* Roboto Mono Italic */ +@font-face { + font-family: "Roboto Mono"; + src: url("/assets/fonts/RobotoMono-Italic.woff2") format("woff2"); + font-weight: 100 700; /* Define the range of weight the variable font supports */ + font-style: italic; + font-display: fallback; +} + +@layer theme { + @import "theme.css"; +} + +@layer base { + @import "global.css"; +} +@import "utilities.css"; +@import "pagefind.css"; +@import "syntax-dark.css"; +@import "syntax-light.css"; +@import "components.css"; +@import "highlight-github-dark.css"; + +@variant dark (&:where(.dark, .dark *)); diff --git a/assets/css/styles.css b/assets/css/styles.css deleted file mode 100644 index 377c07bcc22..00000000000 --- a/assets/css/styles.css +++ /dev/null @@ -1,16 +0,0 @@ -/* see also: tailwind.config.js */ - -@import "tailwindcss/base"; -@import "/assets/css/global"; -@import "/assets/css/typography"; -@import "/assets/css/hack"; - -@import "tailwindcss/components"; -@import "/assets/css/code"; -@import "/assets/css/toc"; - -@import "tailwindcss/utilities"; -@import "/assets/css/syntax-light"; -@import "/assets/css/syntax-dark"; -@import "/assets/css/icons"; -@import "/assets/css/lists"; diff --git a/assets/css/syntax-dark.css b/assets/css/syntax-dark.css index ff24a195488..e66c18186f6 100644 --- a/assets/css/syntax-dark.css +++ b/assets/css/syntax-dark.css @@ -1,343 +1,337 @@ -@layer utilities { - .syntax-dark { - /* Other */ - .x { - color: theme("colors.white"); - } - /* Error */ - .err { - color: theme("colors.red.dark.500"); - } - /* CodeLine */ - .cl { - } - /* LineHighlight */ - .hl { - min-width: fit-content; - background-color: theme("colors.gray.dark.300"); - } - .lntd:first-child .hl, - & > .chroma > code > .hl { - margin-left: -4px; - border-left: 4px solid theme("colors.gray.dark.400"); - } - /* LineNumbersTable */ - .lnt { - white-space: pre; - user-select: none; - margin-right: 0.4em; - padding: 0 0.4em 0 0.4em; - color: theme("colors.gray.dark.400"); - } - /* LineNumbers */ - .ln { - white-space: pre; - user-select: none; - margin-right: 0.4em; - padding: 0 0.4em 0 0.4em; - color: theme("colors.gray.dark.400"); - } - /* Line */ - .line { - display: flex; - } - /* Keyword */ - .k { - color: theme("colors.amber.dark.700"); - } - /* KeywordConstant */ - .kc { - color: theme("colors.violet.dark.700"); - } - /* KeywordDeclaration */ - .kd { - color: theme("colors.amber.dark.700"); - } - /* KeywordNamespace */ - .kn { - color: theme("colors.amber.dark.700"); - } - /* KeywordPseudo */ - .kp { - color: theme("colors.amber.dark.700"); - } - /* KeywordReserved */ - .kr { - color: theme("colors.amber.dark.700"); - } - /* KeywordType */ - .kt { - color: theme("colors.amber.dark.700"); - } - /* Name */ - .n { - color: theme("colors.violet.dark.700"); - } - /* NameAttribute */ - .na { - color: theme("colors.amber.dark.700"); - } - /* NameBuiltin */ - .nb { - color: theme("colors.amber.dark.700"); - } - /* NameBuiltinPseudo */ - .bp { - color: theme("colors.violet.dark.700"); - } - /* NameClass */ - .nc { - color: theme("colors.white"); - } - /* NameConstant */ - .no { - color: theme("colors.white"); - } - /* NameDecorator */ - .nd { - color: theme("colors.violet.dark.700"); - } - /* NameEntity */ - .ni { - color: theme("colors.amber.dark.700"); - } - /* NameException */ - .ne { - color: theme("colors.red.dark.700"); - } - /* NameFunction */ - .nf { - color: theme("colors.blue.dark.600"); - } - /* NameFunctionMagic */ - .fm { - color: theme("colors.blue.dark.600"); - } - /* NameLabel */ - .nl { - color: theme("colors.amber.dark.500"); - } - /* NameNamespace */ - .nn { - color: theme("colors.white"); - } - /* NameOther */ - .nx { - color: theme("colors.white"); - } - /* NameProperty */ - .py { - color: theme("colors.white"); - } - /* NameTag */ - .nt { - color: theme("colors.green.dark.600"); - } - /* NameVariable */ - .nv { - color: theme("colors.white"); - } - /* NameVariableClass */ - .vc { - color: theme("colors.violet.dark.600"); - } - /* NameVariableGlobal */ - .vg { - color: theme("colors.violet.dark.600"); - } - /* NameVariableInstance */ - .vi { - color: theme("colors.violet.dark.600"); - } - /* NameVariableMagic */ - .vm { - color: theme("colors.violet.dark.600"); - } - /* Literal */ - .l { - color: theme("colors.white"); - } - /* LiteralDate */ - .ld { - color: theme("colors.green.dark.600"); - } - /* LiteralString */ - .s { - color: theme("colors.white"); - } - /* LiteralStringAffix */ - .sa { - color: theme("colors.green.dark.600"); - } - /* LiteralStringBacktick */ - .sb { - color: theme("colors.green.dark.600"); - } - /* LiteralStringChar */ - .sc { - color: theme("colors.green.dark.600"); - } - /* LiteralStringDelimiter */ - .dl { - color: theme("colors.green.dark.600"); - } - /* LiteralStringDoc */ - .sd { - color: theme("colors.green.dark.600"); - } - /* LiteralStringDouble */ - .s2 { - color: theme("colors.green.dark.600"); - } - /* LiteralStringEscape */ - .se { - color: theme("colors.white"); - } - /* LiteralStringHeredoc */ - .sh { - color: theme("colors.green.dark.600"); - } - /* LiteralStringInterpol */ - .si { - color: theme("colors.green.dark.600"); - } - /* LiteralStringOther */ - .sx { - color: theme("colors.green.dark.600"); - } - /* LiteralStringRegex */ - .sr { - color: theme("colors.blue.dark.500"); - } - /* LiteralStringSingle */ - .s1 { - color: theme("colors.green.dark.600"); - } - /* LiteralStringSymbol */ - .ss { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumber */ - .m { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberBin */ - .mb { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberFloat */ - .mf { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberHex */ - .mh { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberInteger */ - .mi { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberIntegerLong */ - .il { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberOct */ - .mo { - color: theme("colors.blue.dark.600"); - } - /* Operator */ - .o { - color: theme("colors.blue.dark.700"); - } - /* OperatorWord */ - .ow { - color: theme("colors.amber.dark.700"); - } - /* Punctuation */ - .p { - color: theme("colors.gray.dark.500"); - } - /* Comment */ - .c { - color: theme("colors.gray.dark.500"); - } - /* CommentHashbang */ - .ch { - color: theme("colors.gray.dark.500"); - } - /* CommentMultiline */ - .cm { - color: theme("colors.gray.dark.500"); - } - /* CommentSingle */ - .c1 { - color: theme("colors.gray.dark.500"); - } - /* CommentSpecial */ - .cs { - color: theme("colors.gray.dark.500"); - } - /* CommentPreproc */ - .cp { - color: theme("colors.gray.dark.500"); - } - /* CommentPreprocFile */ - .cpf { - color: theme("colors.gray.dark.500"); - } - /* Generic */ - .g { - color: theme("colors.white"); - } - /* GenericDeleted */ - .gd { - color: theme("colors.red.dark.500"); - } - /* GenericEmph */ - .ge { - color: theme("colors.white"); - } - /* GenericError */ - .gr { - color: theme("colors.red.dark.500"); - } - /* GenericHeading */ - .gh { - color: theme("colors.gray.dark.600"); - } - /* GenericInserted */ - .gi { - color: theme("colors.green.dark.500"); - } - /* GenericOutput */ - .go { - color: theme("colors.white"); - } - /* GenericPrompt */ - .gp { - user-select: none; - color: theme("colors.green.dark.400"); - } - /* GenericStrong */ - .gs { - color: theme("colors.white"); - } - /* GenericSubheading */ - .gu { - color: theme("colors.gray.dark.600"); - } - /* GenericTraceback */ - .gt { - color: theme("colors.red.dark.500"); - } - /* GenericUnderline */ - .gl { - color: theme("colors.white"); - text-decoration: underline; - } - /* TextWhitespace */ - .w { - color: theme("colors.gray.dark.100"); - } +@utility syntax-dark { + /* Other */ + .x { + color: var(--color-white-main); + } + /* Error */ + .err { + color: var(--color-red-500); + } + /* CodeLine */ + .cl { + color: var(--color-gray-200); + } + /* LineHighlight */ + .hl { + min-width: fit-content; + background-color: var(--color-gray-800); + } + /* LineNumbersTable */ + .lnt { + white-space: pre; + user-select: none; + margin-right: 0.4em; + padding: 0 0.4em 0 0.4em; + color: var(--color-gray-300); + } + /* LineNumbers */ + .ln { + white-space: pre; + user-select: none; + margin-right: 0.4em; + padding: 0 0.4em 0 0.4em; + color: var(--color-gray-900); + } + /* Line */ + .line { + display: flex; + } + /* Keyword */ + .k { + color: var(--color-yellow-700); + } + /* KeywordConstant */ + .kc { + color: var(--color-violet-300); + } + /* KeywordDeclaration */ + .kd { + color: var(--color-yellow-700); + } + /* KeywordNamespace */ + .kn { + color: var(--color-yellow-700); + } + /* KeywordPseudo */ + .kp { + color: var(--color-yellow-700); + } + /* KeywordReserved */ + .kr { + color: var(--color-yellow-700); + } + /* KeywordType */ + .kt { + color: var(--color-yellow-700); + } + /* Name */ + .n { + color: var(--color-violet-300); + } + /* NameAttribute */ + .na { + color: var(--color-yellow-700); + } + /* NameBuiltin */ + .nb { + color: var(--color-yellow-700); + } + /* NameBuiltinPseudo */ + .bp { + color: var(--color-violet-300); + } + /* NameClass */ + .nc { + color: var(--color-white-main); + } + /* NameConstant */ + .no { + color: var(--color-white-main); + } + /* NameDecorator */ + .nd { + color: var(--color-violet-300); + } + /* NameEntity */ + .ni { + color: var(--color-yellow-700); + } + /* NameException */ + .ne { + color: var(--color-red-700); + } + /* NameFunction */ + .nf { + color: var(--color-blue-400); + } + /* NameFunctionMagic */ + .fm { + color: var(--color-blue-400); + } + /* NameLabel */ + .nl { + color: var(--color-yellow-500); + } + /* NameNamespace */ + .nn { + color: var(--color-white-main); + } + /* NameOther */ + .nx { + color: var(--color-white-main); + } + /* NameProperty */ + .py { + color: var(--color-violet-300); + } + /* NameTag */ + .nt { + color: var(--color-green-300); + } + /* NameVariable */ + .nv { + color: var(--color-green-500); + } + /* NameVariableClass */ + .vc { + color: var(--color-violet-600); + } + /* NameVariableGlobal */ + .vg { + color: var(--color-violet-600); + } + /* NameVariableInstance */ + .vi { + color: var(--color-violet-600); + } + /* NameVariableMagic */ + .vm { + color: var(--color-violet-600); + } + /* Literal */ + .l { + color: var(--color-white-main); + } + /* LiteralDate */ + .ld { + color: var(--color-green-600); + } + /* LiteralString */ + .s { + color: var(--color-white-main); + } + /* LiteralStringAffix */ + .sa { + color: var(--color-green-600); + } + /* LiteralStringBacktick */ + .sb { + color: var(--color-green-600); + } + /* LiteralStringChar */ + .sc { + color: var(--color-green-600); + } + /* LiteralStringDelimiter */ + .dl { + color: var(--color-green-600); + } + /* LiteralStringDoc */ + .sd { + color: var(--color-green-600); + } + /* LiteralStringDouble */ + .s2 { + color: var(--color-green-600); + } + /* LiteralStringEscape */ + .se { + color: var(--color-white-main); + } + /* LiteralStringHeredoc */ + .sh { + color: var(--color-green-600); + } + /* LiteralStringInterpol */ + .si { + color: var(--color-green-600); + } + /* LiteralStringOther */ + .sx { + color: var(--color-green-600); + } + /* LiteralStringRegex */ + .sr { + color: var(--color-blue-400); + } + /* LiteralStringSingle */ + .s1 { + color: var(--color-green-600); + } + /* LiteralStringSymbol */ + .ss { + color: var(--color-blue-400); + } + /* LiteralNumber */ + .m { + color: var(--color-blue-400); + } + /* LiteralNumberBin */ + .mb { + color: var(--color-blue-400); + } + /* LiteralNumberFloat */ + .mf { + color: var(--color-blue-400); + } + /* LiteralNumberHex */ + .mh { + color: var(--color-blue-400); + } + /* LiteralNumberInteger */ + .mi { + color: var(--color-blue-400); + } + /* LiteralNumberIntegerLong */ + .il { + color: var(--color-blue-400); + } + /* LiteralNumberOct */ + .mo { + color: var(--color-blue-400); + } + /* Operator */ + .o { + color: var(--color-blue-200); + } + /* OperatorWord */ + .ow { + color: var(--color-yellow-700); + } + /* Punctuation */ + .p { + color: var(--color-gray-500); + } + /* Comment */ + .c { + color: var(--color-gray-500); + } + /* CommentHashbang */ + .ch { + color: var(--color-gray-500); + } + /* CommentMultiline */ + .cm { + color: var(--color-gray-500); + } + /* CommentSingle */ + .c1 { + color: var(--color-gray-500); + } + /* CommentSpecial */ + .cs { + color: var(--color-gray-500); + } + /* CommentPreproc */ + .cp { + color: var(--color-gray-500); + } + /* CommentPreprocFile */ + .cpf { + color: var(--color-gray-500); + } + /* Generic */ + .g { + color: var(--color-white-main); + } + /* GenericDeleted */ + .gd { + color: var(--color-red-500); + } + /* GenericEmph */ + .ge { + color: var(--color-white-main); + } + /* GenericError */ + .gr { + color: var(--color-red-500); + } + /* GenericHeading */ + .gh { + color: var(--color-gray-600); + } + /* GenericInserted */ + .gi { + color: var(--color-green-500); + } + /* GenericOutput */ + .go { + color: var(--color-white-main); + } + /* GenericPrompt */ + .gp { + user-select: none; + color: var(--color-green-500); + } + /* GenericStrong */ + .gs { + color: var(--color-white-main); + } + /* GenericSubheading */ + .gu { + color: var(--color-gray-600); + } + /* GenericTraceback */ + .gt { + color: var(--color-red-500); + } + /* GenericUnderline */ + .gl { + color: var(--color-white-main); + text-decoration: underline; + } + /* TextWhitespace */ + .w { + color: var(--color-gray-100); } } diff --git a/assets/css/syntax-light.css b/assets/css/syntax-light.css index ba0bb789f85..e9c3151d14f 100644 --- a/assets/css/syntax-light.css +++ b/assets/css/syntax-light.css @@ -1,343 +1,337 @@ -@layer utilities { - .syntax-light { - /* Other */ - .x { - color: theme("colors.black"); - } - /* Error */ - .err { - color: theme("colors.red.light.500"); - } - /* CodeLine */ - .cl { - } - /* LineHighlight */ - .hl { - min-width: fit-content; - background-color: theme("colors.blue.light.100"); - } - .lntd:first-child .hl, - & > .chroma > code > .hl { - margin-left: -4px; - border-left: 4px solid theme("colors.blue.light.300"); - } - /* LineNumbersTable */ - .lnt { - white-space: pre; - user-select: none; - margin-right: 0.4em; - padding: 0 0.4em 0 0.4em; - color: theme("colors.gray.light.400"); - } - /* LineNumbers */ - .ln { - white-space: pre; - user-select: none; - margin-right: 0.4em; - padding: 0 0.4em 0 0.4em; - color: theme("colors.gray.light.400"); - } - /* Line */ - .line { - display: flex; - } - /* Keyword */ - .k { - color: theme("colors.amber.light.500"); - } - /* KeywordConstant */ - .kc { - color: theme("colors.violet.light.400"); - } - /* KeywordDeclaration */ - .kd { - color: theme("colors.amber.light.500"); - } - /* KeywordNamespace */ - .kn { - color: theme("colors.amber.light.500"); - } - /* KeywordPseudo */ - .kp { - color: theme("colors.amber.light.500"); - } - /* KeywordReserved */ - .kr { - color: theme("colors.amber.light.500"); - } - /* KeywordType */ - .kt { - color: theme("colors.amber.light.500"); - } - /* Name */ - .n { - color: theme("colors.violet.light.400"); - } - /* NameAttribute */ - .na { - color: theme("colors.amber.light.500"); - } - /* NameBuiltin */ - .nb { - color: theme("colors.amber.light.500"); - } - /* NameBuiltinPseudo */ - .bp { - color: theme("colors.violet.light.400"); - } - /* NameClass */ - .nc { - color: theme("colors.black"); - } - /* NameConstant */ - .no { - color: theme("colors.black"); - } - /* NameDecorator */ - .nd { - color: theme("colors.violet.light.400"); - } - /* NameEntity */ - .ni { - color: theme("colors.amber.light.500"); - } - /* NameException */ - .ne { - color: theme("colors.red.light.700"); - } - /* NameFunction */ - .nf { - color: theme("colors.blue.light.600"); - } - /* NameFunctionMagic */ - .fm { - color: theme("colors.blue.light.600"); - } - /* NameLabel */ - .nl { - color: theme("colors.amber.light.700"); - } - /* NameNamespace */ - .nn { - color: theme("colors.black"); - } - /* NameOther */ - .nx { - color: theme("colors.black"); - } - /* NameProperty */ - .py { - color: theme("colors.black"); - } - /* NameTag */ - .nt { - color: theme("colors.green.light.600"); - } - /* NameVariable */ - .nv { - color: theme("colors.black"); - } - /* NameVariableClass */ - .vc { - color: theme("colors.violet.light.600"); - } - /* NameVariableGlobal */ - .vg { - color: theme("colors.violet.light.600"); - } - /* NameVariableInstance */ - .vi { - color: theme("colors.violet.light.600"); - } - /* NameVariableMagic */ - .vm { - color: theme("colors.violet.light.600"); - } - /* Literal */ - .l { - color: theme("colors.black"); - } - /* LiteralDate */ - .ld { - color: theme("colors.black"); - } - /* LiteralString */ - .s { - color: theme("colors.black"); - } - /* LiteralStringAffix */ - .sa { - color: theme("colors.green.light.600"); - } - /* LiteralStringBacktick */ - .sb { - color: theme("colors.green.light.600"); - } - /* LiteralStringChar */ - .sc { - color: theme("colors.green.light.600"); - } - /* LiteralStringDelimiter */ - .dl { - color: theme("colors.green.light.600"); - } - /* LiteralStringDoc */ - .sd { - color: #8f5902; - } - /* LiteralStringDouble */ - .s2 { - color: theme("colors.green.light.600"); - } - /* LiteralStringEscape */ - .se { - color: theme("colors.black"); - } - /* LiteralStringHeredoc */ - .sh { - color: theme("colors.green.light.600"); - } - /* LiteralStringInterpol */ - .si { - color: theme("colors.green.light.600"); - } - /* LiteralStringOther */ - .sx { - color: theme("colors.green.light.600"); - } - /* LiteralStringRegex */ - .sr { - color: theme("colors.blue.light.500"); - } - /* LiteralStringSingle */ - .s1 { - color: theme("colors.green.light.600"); - } - /* LiteralStringSymbol */ - .ss { - color: theme("colors.green.light.600"); - } - /* LiteralNumber */ - .m { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberBin */ - .mb { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberFloat */ - .mf { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberHex */ - .mh { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberInteger */ - .mi { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberIntegerLong */ - .il { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberOct */ - .mo { - color: theme("colors.blue.light.600"); - } - /* Operator */ - .o { - color: theme("colors.blue.light.400"); - } - /* OperatorWord */ - .ow { - color: theme("colors.amber.light.500"); - } - /* Punctuation */ - .p { - color: theme("colors.gray.light.400"); - } - /* Comment */ - .c { - color: theme("colors.gray.light.400"); - } - /* CommentHashbang */ - .ch { - color: theme("colors.gray.light.400"); - } - /* CommentMultiline */ - .cm { - color: theme("colors.gray.light.400"); - } - /* CommentSingle */ - .c1 { - color: theme("colors.gray.light.400"); - } - /* CommentSpecial */ - .cs { - color: theme("colors.gray.light.400"); - } - /* CommentPreproc */ - .cp { - color: theme("colors.gray.light.400"); - } - /* CommentPreprocFile */ - .cpf { - color: theme("colors.gray.light.400"); - } - /* Generic */ - .g { - color: theme("colors.black"); - } - /* GenericDeleted */ - .gd { - color: theme("colors.red.light.500"); - } - /* GenericEmph */ - .ge { - color: theme("colors.black"); - } - /* GenericError */ - .gr { - color: theme("colors.red.light.500"); - } - /* GenericHeading */ - .gh { - color: theme("colors.gray.light.600"); - } - /* GenericInserted */ - .gi { - color: theme("colors.green.light.500"); - } - /* GenericOutput */ - .go { - color: theme("colors.black"); - } - /* GenericPrompt */ - .gp { - user-select: none; - color: theme("colors.green.light.400"); - } - /* GenericStrong */ - .gs { - color: theme("colors.black"); - } - /* GenericSubheading */ - .gu { - color: theme("colors.gray.light.600"); - } - /* GenericTraceback */ - .gt { - color: theme("colors.red.light.500"); - } - /* GenericUnderline */ - .gl { - color: theme("colors.black"); - text-decoration: underline; - } - /* TextWhitespace */ - .w { - color: theme("colors.gray.light.100"); - } +@utility syntax-light { + /* Other */ + .x { + color: var(--color-black-main); + } + /* Error */ + .err { + color: var(--color-red-500); + } + /* CodeLine */ + .cl { + color: var(--color-gray-700); + } + /* LineHighlight */ + .hl { + min-width: fit-content; + background-color: var(--color-gray-100); + } + /* LineNumbersTable */ + .lnt { + white-space: pre; + user-select: none; + margin-right: 0.4em; + padding: 0 0.4em 0 0.4em; + color: var(--color-gray-400); + } + /* LineNumbers */ + .ln { + white-space: pre; + user-select: none; + margin-right: 0.4em; + padding: 0 0.4em 0 0.4em; + color: var(--color-gray-400); + } + /* Line */ + .line { + display: flex; + } + /* Keyword */ + .k { + color: var(--color-yellow-700); + } + /* KeywordConstant */ + .kc { + color: var(--color-violet-400); + } + /* KeywordDeclaration */ + .kd { + color: var(--color-yellow-700); + } + /* KeywordNamespace */ + .kn { + color: var(--color-yellow-700); + } + /* KeywordPseudo */ + .kp { + color: var(--color-yellow-700); + } + /* KeywordReserved */ + .kr { + color: var(--color-yellow-700); + } + /* KeywordType */ + .kt { + color: var(--color-yellow-700); + } + /* Name */ + .n { + color: var(--color-violet-400); + } + /* NameAttribute */ + .na { + color: var(--color-yellow-700); + } + /* NameBuiltin */ + .nb { + color: var(--color-yellow-800); + } + /* NameBuiltinPseudo */ + .bp { + color: var(--color-violet-400); + } + /* NameClass */ + .nc { + color: var(--color-black-main); + } + /* NameConstant */ + .no { + color: var(--color-black-main); + } + /* NameDecorator */ + .nd { + color: var(--color-violet-400); + } + /* NameEntity */ + .ni { + color: var(--color-yellow-700); + } + /* NameException */ + .ne { + color: var(--color-red-700); + } + /* NameFunction */ + .nf { + color: var(--color-blue-500); + } + /* NameFunctionMagic */ + .fm { + color: var(--color-blue-500); + } + /* NameLabel */ + .nl { + color: var(--color-yellow-700); + } + /* NameNamespace */ + .nn { + color: var(--color-black-main); + } + /* NameOther */ + .nx { + color: var(--color-black-main); + } + /* NameProperty */ + .py { + color: var(--color-black-main); + } + /* NameTag */ + .nt { + color: var(--color-blue-400); + } + /* NameVariable */ + .nv { + color: var(--color-black-main); + } + /* NameVariableClass */ + .vc { + color: var(--color-violet-600); + } + /* NameVariableGlobal */ + .vg { + color: var(--color-violet-600); + } + /* NameVariableInstance */ + .vi { + color: var(--color-violet-600); + } + /* NameVariableMagic */ + .vm { + color: var(--color-violet-600); + } + /* Literal */ + .l { + color: var(--color-black-main); + } + /* LiteralDate */ + .ld { + color: var(--color-black-main); + } + /* LiteralString */ + .s { + color: var(--color-black-main); + } + /* LiteralStringAffix */ + .sa { + color: var(--color-green-700); + } + /* LiteralStringBacktick */ + .sb { + color: var(--color-green-700); + } + /* LiteralStringChar */ + .sc { + color: var(--color-green-700); + } + /* LiteralStringDelimiter */ + .dl { + color: var(--color-green-700); + } + /* LiteralStringDoc */ + .sd { + color: #8f5902; + } + /* LiteralStringDouble */ + .s2 { + color: var(--color-green-700); + } + /* LiteralStringEscape */ + .se { + color: var(--color-black-main); + } + /* LiteralStringHeredoc */ + .sh { + color: var(--color-green-700); + } + /* LiteralStringInterpol */ + .si { + color: var(--color-green-700); + } + /* LiteralStringOther */ + .sx { + color: var(--color-green-700); + } + /* LiteralStringRegex */ + .sr { + color: var(--color-blue-500); + } + /* LiteralStringSingle */ + .s1 { + color: var(--color-green-700); + } + /* LiteralStringSymbol */ + .ss { + color: var(--color-green-700); + } + /* LiteralNumber */ + .m { + color: var(--color-blue-500); + } + /* LiteralNumberBin */ + .mb { + color: var(--color-blue-500); + } + /* LiteralNumberFloat */ + .mf { + color: var(--color-blue-500); + } + /* LiteralNumberHex */ + .mh { + color: var(--color-blue-500); + } + /* LiteralNumberInteger */ + .mi { + color: var(--color-blue-500); + } + /* LiteralNumberIntegerLong */ + .il { + color: var(--color-blue-500); + } + /* LiteralNumberOct */ + .mo { + color: var(--color-blue-500); + } + /* Operator */ + .o { + color: var(--color-blue-400); + } + /* OperatorWord */ + .ow { + color: var(--color-yellow-700); + } + /* Punctuation */ + .p { + color: var(--color-gray-400); + } + /* Comment */ + .c { + color: var(--color-gray-400); + } + /* CommentHashbang */ + .ch { + color: var(--color-gray-400); + } + /* CommentMultiline */ + .cm { + color: var(--color-gray-400); + } + /* CommentSingle */ + .c1 { + color: var(--color-gray-400); + } + /* CommentSpecial */ + .cs { + color: var(--color-gray-400); + } + /* CommentPreproc */ + .cp { + color: var(--color-gray-400); + } + /* CommentPreprocFile */ + .cpf { + color: var(--color-gray-400); + } + /* Generic */ + .g { + color: var(--color-black-main); + } + /* GenericDeleted */ + .gd { + color: var(--color-red-500); + } + /* GenericEmph */ + .ge { + color: var(--color-black-main); + } + /* GenericError */ + .gr { + color: var(--color-red-500); + } + /* GenericHeading */ + .gh { + color: var(--color-gray-600); + } + /* GenericInserted */ + .gi { + color: var(--color-green-500); + } + /* GenericOutput */ + .go { + color: var(--color-black-main); + } + /* GenericPrompt */ + .gp { + user-select: none; + color: var(--color-green-400); + } + /* GenericStrong */ + .gs { + color: var(--color-black-main); + } + /* GenericSubheading */ + .gu { + color: var(--color-gray-600); + } + /* GenericTraceback */ + .gt { + color: var(--color-red-500); + } + /* GenericUnderline */ + .gl { + color: var(--color-black-main); + text-decoration: underline; + } + /* TextWhitespace */ + .w { + color: var(--color-gray-100); } } diff --git a/assets/css/theme.css b/assets/css/theme.css new file mode 100644 index 00000000000..4d6ecf1c5df --- /dev/null +++ b/assets/css/theme.css @@ -0,0 +1,207 @@ +@theme inline { + --font-sans: "roboto flex", sans-serif; + --font-mono: "roboto flex mono", ui-monospace, SFMono-Regular, monospace; + --default-font-family: var(--font-sans); + + --text-xs: 0.7143rem; + --text-xs--letter-spacing: 0.015em; + --text-xs--font-weight: 500; + --text-sm: 0.851rem; + --text-base: 14px; + --text-lg: 1.1429rem; + --text-lg--line-height: 1.75; + --text-xl: 1.2857rem; + --text-xl--letter-spacing: -0.015em; + --text-xl--font-weight: 500; + --text-2xl: 1.5rem; + --text-2xl--letter-spacing: -0.015em; + --text-2xl--font-weight: 500; + --text-3xl: 2rem; + --text-3xl--font-weight: 500; + --text-4xl: 2.5rem; + --text-4xl--letter-spacing: -0.015em; + --text-4xl--font-weight: 500; + + --color-background-light: #f9f9fa; + --color-background-dark: #10151b; + --color-primary-blue: var(--color-blue); + + --color-divider-light: hsla(0, 0%, 0%, 0.1); + --color-divider-dark: hsla(0, 0%, 100%, 0.05); + + --card-bg-dark: #1d262d; + --card-border-dark: #516980; + --card-bg-dark: var(--color-gray-900); + --card-border-dark: var(--color-gray-700); + + --color-navbar-bg: var(--color-background-light); + --color-navbar-bg-dark: var(--color-background-dark); + --color-navbar-text: var(--color-gray-700); + --color-navbar-text-dark: var(--tw-prose-body); + --color-navbar-border-color-light: var(--tw-prose-inverse-body); + --navbar-font-size: 0.92rem; + --navbar-group-font-title-size: 1rem; + --color-navbar-text-dark: var(--color-gray-200); + --color-navbar-group-text-dark: var(--tw-prose-body); + + --color-blue: var(--color-blue-400); + --color-blue-100: rgba(217, 229, 252, 1); + --color-blue-200: rgba(170, 196, 248, 1); + --color-blue-300: rgba(123, 164, 244, 1); + --color-blue-400: rgba(75, 131, 241, 1); + --color-blue-50: rgba(246, 248, 254, 1); + --color-blue-500: rgba(37, 96, 255, 1); + --color-blue-600: rgba(13, 77, 242, 1); + --color-blue-700: rgba(0, 61, 181, 1); + --color-blue-800: rgba(0, 41, 120, 1); + --color-blue-900: rgba(0, 29, 86, 1); + --color-blue-950: rgba(0, 21, 60, 1); + --color-blue-focus: rgba(37, 96, 255, 0.24); + --color-blue-focusvisible: rgba(37, 96, 255, 0.32); + --color-blue-hover: rgba(37, 96, 255, 0.12); + --color-blue-outlinedborder: rgba(37, 96, 255, 0.56); + --color-blue-selected: rgba(37, 96, 255, 0.16); + + --color-gray: var(--color-gray-600); + --color-gray-100: rgba(231, 234, 239, 1); + --color-gray-200: rgba(200, 207, 218, 1); + --color-gray-300: rgba(169, 180, 198, 1); + --color-gray-400: rgba(139, 153, 178, 1); + --color-gray-50: rgba(249, 250, 251, 1); + --color-gray-500: rgba(108, 126, 157, 1); + --color-gray-600: rgba(86, 101, 129, 1); + --color-gray-700: rgba(67, 76, 95, 1); + --color-gray-800: rgba(44, 51, 63, 1); + --color-gray-900: rgba(30, 33, 41, 1); + --color-gray-950: rgb(18, 21, 31); + --color-gray-focus: rgba(108, 126, 157, 0.24); + --color-gray-focusvisible: rgba(108, 126, 157, 0.32); + --color-gray-hover: rgba(108, 126, 157, 0.12); + --color-gray-outlinedborder: rgba(108, 126, 157, 0.56); + --color-gray-selected: rgba(108, 126, 157, 0.16); + + --color-green-100: rgba(235, 249, 238, 1); + --color-green-200: rgba(208, 241, 215, 1); + --color-green-300: rgba(169, 229, 189, 1); + --color-green-400: rgba(129, 217, 162, 1); + --color-green-50: rgba(245, 252, 247, 1); + --color-green-500: rgba(90, 206, 140, 1); + --color-green-600: rgba(56, 189, 125, 1); + --color-green-700: rgba(45, 149, 104, 1); + --color-green-800: rgba(33, 110, 75, 1); + --color-green-900: rgba(23, 75, 50, 1); + --color-green-950: rgba(17, 55, 26, 1); + --color-green-focus: rgba(56, 189, 125, 0.24); + --color-green-focusvisible: rgba(56, 189, 125, 0.32); + --color-green-hover: rgba(56, 189, 125, 0.12); + --color-green-outlinedborder: rgba(56, 189, 125, 0.56); + --color-green-selected: rgba(56, 189, 125, 0.16); + + --color-orange-100: rgba(255, 233, 217, 1); + --color-orange-200: rgba(255, 216, 187, 1); + --color-orange-300: rgba(255, 196, 153, 1); + --color-orange-400: rgba(255, 169, 107, 1); + --color-orange-50: rgba(255, 249, 245, 1); + --color-orange-500: rgba(255, 135, 49, 1); + --color-orange-600: rgba(255, 107, 0, 1); + --color-orange-700: rgba(218, 92, 0, 1); + --color-orange-800: rgba(173, 72, 0, 1); + --color-orange-900: rgba(137, 58, 1, 1); + --color-orange-950: rgba(94, 40, 0, 1); + --color-orange-focus: rgba(255, 107, 0, 0.24); + --color-orange-focusvisible: rgba(255, 107, 0, 0.32); + --color-orange-hover: rgba(255, 107, 0, 0.12); + --color-orange-outlinedborder: rgba(255, 107, 0, 0.56); + --color-orange-selected: rgba(255, 107, 0, 0.16); + + --color-pink-100: rgba(255, 230, 251, 1); + --color-pink-200: rgba(255, 201, 246, 1); + --color-pink-300: rgba(255, 166, 240, 1); + --color-pink-400: rgba(252, 113, 220, 1); + --color-pink-50: rgba(255, 247, 254, 1); + --color-pink-500: rgba(237, 73, 199, 1); + --color-pink-600: rgba(201, 24, 171, 1); + --color-pink-700: rgba(171, 0, 137, 1); + --color-pink-800: rgba(131, 0, 105, 1); + --color-pink-900: rgba(109, 0, 81, 1); + --color-pink-950: rgba(85, 0, 51, 1); + --color-pink-focus: rgba(201, 24, 171, 0.24); + --color-pink-focusvisible: rgba(201, 24, 171, 0.32); + --color-pink-hover: rgba(201, 24, 171, 0.12); + --color-pink-outlinedborder: rgba(201, 24, 171, 0.56); + --color-pink-selected: rgba(201, 24, 171, 0.16); + + --color-red-100: rgba(255, 223, 223, 1); + --color-red-200: rgba(255, 194, 194, 1); + --color-red-300: rgba(255, 168, 168, 1); + --color-red-400: rgba(255, 117, 117, 1); + --color-red-50: rgba(255, 245, 245, 1); + --color-red-500: rgba(255, 87, 87, 1); + --color-red-600: rgba(244, 47, 57, 1); + --color-red-700: rgba(228, 12, 44, 1); + --color-red-800: rgba(179, 9, 9, 1); + --color-red-900: rgba(137, 0, 0, 1); + --color-red-950: rgba(110, 0, 0, 1); + --color-red-focus: rgba(244, 47, 57, 0.24); + --color-red-focusvisible: rgba(244, 47, 57, 0.32); + --color-red-hover: rgba(244, 47, 57, 0.12); + --color-red-outlinedborder: rgba(244, 47, 57, 0.56); + --color-red-selected: rgba(244, 47, 57, 0.16); + + --color-teal-100: rgba(223, 246, 246, 1); + --color-teal-200: rgba(195, 240, 241, 1); + --color-teal-300: rgba(160, 229, 232, 1); + --color-teal-400: rgba(106, 220, 222, 1); + --color-teal-50: rgba(243, 252, 252, 1); + --color-teal-500: rgba(47, 208, 210, 1); + --color-teal-600: rgba(27, 189, 191, 1); + --color-teal-700: rgba(44, 158, 160, 1); + --color-teal-800: rgba(24, 116, 115, 1); + --color-teal-900: rgba(18, 85, 85, 1); + --color-teal-950: rgba(9, 61, 61, 1); + --color-teal-focus: rgba(27, 189, 191, 0.24); + --color-teal-focusvisible: rgba(27, 189, 191, 0.32); + --color-teal-hover: rgba(27, 189, 191, 0.12); + --color-teal-outlinedborder: rgba(27, 189, 191, 0.56); + --color-teal-selected: rgba(27, 189, 191, 0.16); + + --color-violet: var(--color-violet-500); + --color-violet-100: rgba(239, 224, 255, 1); + --color-violet-200: rgba(211, 183, 255, 1); + --color-violet-300: rgba(174, 130, 255, 1); + --color-violet-400: rgba(152, 96, 255, 1); + --color-violet-50: rgba(252, 249, 255, 1); + --color-violet-500: rgba(125, 46, 255, 1); + --color-violet-600: rgba(109, 0, 235, 1); + --color-violet-700: rgba(87, 0, 187, 1); + --color-violet-800: rgba(69, 0, 147, 1); + --color-violet-900: rgba(55, 0, 118, 1); + --color-violet-950: rgba(37, 0, 80, 1); + --color-violet-focus: rgba(125, 46, 255, 0.24); + --color-violet-focusvisible: rgba(125, 46, 255, 0.32); + --color-violet-hover: rgba(125, 46, 255, 0.12); + --color-violet-outlinedborder: rgba(125, 46, 255, 0.56); + --color-violet-selected: rgba(125, 46, 255, 0.16); + + --color-white-main: rgba(255, 255, 255, 1); + --color-yellow-100: rgba(255, 245, 219, 1); + --color-yellow-200: rgba(255, 241, 204, 1); + --color-yellow-300: rgba(255, 232, 173, 1); + --color-yellow-400: rgba(255, 218, 122, 1); + --color-yellow-50: rgba(255, 251, 240, 1); + --color-yellow-500: rgba(255, 204, 72, 1); + --color-yellow-600: rgba(248, 182, 15, 1); + --color-yellow-700: rgba(235, 156, 0, 1); + --color-yellow-800: rgba(184, 110, 0, 1); + --color-yellow-900: rgba(133, 73, 0, 1); + --color-yellow-950: rgba(100, 55, 0, 1); + --color-yellow-focus: rgba(235, 156, 0, 0.24); + --color-yellow-focusvisible: rgba(235, 156, 0, 0.32); + --color-yellow-hover: rgba(235, 156, 0, 0.12); + --color-yellow-outlinedborder: rgba(235, 156, 0, 0.56); + --color-yellow-selected: rgba(235, 156, 0, 0.16); + + --topnav-button-bg: #4878f3; + --tw-prose-code-bg: var(--color-gray-100); + --tw-prose-code-bg-dark: var(--color-gray-800); +} diff --git a/assets/css/toc.css b/assets/css/toc.css deleted file mode 100644 index 91ff92d7cd9..00000000000 --- a/assets/css/toc.css +++ /dev/null @@ -1,14 +0,0 @@ -@layer components { - #TableOfContents { - .toc a { - @apply block max-w-full truncate py-1 pl-2 hover:font-medium hover:no-underline; - &[aria-current="true"], - &:hover { - @apply border-l-2 border-l-gray-light bg-gradient-to-r from-gray-light-100 font-medium text-black dark:border-l-gray-dark dark:from-gray-dark-200 dark:text-white; - } - &:not([aria-current="true"]) { - @apply text-gray-light-600 hover:text-black dark:text-gray-dark-700 dark:hover:text-white; - } - } - } -} diff --git a/assets/css/typography.css b/assets/css/typography.css deleted file mode 100644 index 008e7af7049..00000000000 --- a/assets/css/typography.css +++ /dev/null @@ -1,77 +0,0 @@ -@layer base { - - /* - * Font faces for Roboto Flex and Roboto Mono. - * - * - https://fonts.google.com/specimen/Roboto+Flex - * - https://fonts.google.com/specimen/Roboto+Mono - * - * The TTF fonts have been compressed to woff2, - * preserving the latin character subset. - * - * */ - - /* Roboto Flex */ - @font-face { - font-family: 'Roboto Flex'; - src: url('/assets/fonts/RobotoFlex.woff2') format('woff2'); - font-weight: 100 1000; /* Range of weights Roboto Flex supports */ - font-stretch: 100%; /* Range of width Roboto Flex supports */ - font-style: oblique 0deg 10deg; /* Range of oblique angle Roboto Flex supports */ - font-display: fallback; - } - - /* Roboto Mono */ - @font-face { - font-family: 'Roboto Mono'; - src: url('/assets/fonts/RobotoMono-Regular.woff2') format('woff2'); - font-weight: 100 700; /* Define the range of weight the variable font supports */ - font-style: normal; - font-display: fallback; - } - - /* Roboto Mono Italic */ - @font-face { - font-family: 'Roboto Mono'; - src: url('/assets/fonts/RobotoMono-Italic.woff2') format('woff2'); - font-weight: 100 700; /* Define the range of weight the variable font supports */ - font-style: italic; - font-display: fallback; - } - - .prose { - li { - @apply my-2; - > :last-child, - > :first-child { - margin: 0; - } - } - a { - font-weight: 400; - } - hr { - @apply mb-4 mt-8; - } - h1 { - @apply my-4 text-4xl; - line-height: 1.167; - } - h2 { - @apply mb-4 mt-8 text-3xl; - line-height: 1.2; - } - h3 { - @apply text-2xl; - line-height: 1.167; - } - h4 { - @apply text-xl; - line-height: 1.235; - } - h5 { - @apply text-lg; - line-height: 1.75; - } - } -} diff --git a/assets/css/utilities.css b/assets/css/utilities.css new file mode 100644 index 00000000000..1fec08720ae --- /dev/null +++ b/assets/css/utilities.css @@ -0,0 +1,339 @@ +@utility icon-xs { + svg { + font-size: 12px; + } +} + +@utility icon-sm { + svg { + font-size: 16px; + } +} + +@utility icon-md { + svg { + font-size: 24px; + } +} + +@utility icon-lg { + svg { + font-size: 32px; + } +} + +@utility text-primary-blue { + color: var(--color-primary-blue); +} + +@utility link { + @apply text-blue no-underline dark:text-blue-400; + font-weight: inherit; + &:hover { + @apply underline underline-offset-3; + } +} + +@utility invertible { + @apply dark:hue-rotate-180 dark:invert dark:filter; +} + +@utility bg-background-toc { + background-color: var(--color-navbar-bg); + .dark & { + background-color: var(--color-navbar-bg-dark); + } +} + +@utility icon-svg { + svg { + font-size: 24px; + width: 1em; + height: 1em; + display: inline-block; + fill: currentColor; + } +} +@utility icon-svg-stroke { + svg { + font-size: 24px; + width: 1em; + height: 1em; + display: inline-block; + stroke: currentColor; + } +} + +@utility icon-xs { + svg { + font-size: 12px; + } +} + +@utility icon-sm { + svg { + font-size: 16px; + } +} + +@utility icon-lg { + svg { + font-size: 32px; + } +} + +@utility navbar-font { + font-size: var(--navbar-font-size); + color: var(--color-navbar-text); + .dark & { + color: var(--color-navbar-text-dark); + } +} + +@utility navbar-entry-margin { + @apply px-2 py-1; +} + +@utility navbar-group { + @apply mt-5; +} + +@utility navbar-entry-background-current { + @apply bg-gray-100 dark:bg-gray-900; +} +@utility navbar-group-font-title { + font-size: var(--color-navbar-group-font-title-size); + @apply pb-1.5 font-semibold uppercase; + color: var(--color-navbar-text); + .dark & { + color: var(--color-navbar-text-dark); + } +} + +@utility prose { + table:not(.lntable) code { + overflow-wrap: unset; + white-space: nowrap; + } + + /* code in `inline code` style */ + :where(code):not(:where([class~="not-prose"], [class~="not-prose"] *)), + a > code { + font-size: 0.875em; + font-weight: 400 !important; + border: 1px solid !important; + border-radius: 0.25rem; + border: none !important; + padding: 4px !important; + background: var(--tw-prose-code-bg) !important; + .dark & { + background: var(--tw-prose-code-bg-dark) !important; + } + &::before, + &::after { + content: none !important; + } + } + + /* code blocks with unrecognized languages*/ + pre:not(.chroma) { + @apply overflow-x-auto p-3; + background: var(--tw-prose-code-bg); + color: var(--color-gray-700); + .dark & { + background: var(--tw-prose-code-bg-dark); + color: var(--color-gray-200); + } + } + + .highlight { + @apply my-0 overflow-x-auto p-2; + + /* LineTableTD */ + .lntd { + vertical-align: top; + padding: 0; + margin: 0; + font-weight: 400; + padding: 0 4px; + &:first-child { + width: 0; + } + } + + /* LineTableTD */ + .lntd { + vertical-align: top; + padding: 0; + margin: 0; + border: 0; + } + /* LineTable */ + .lntable { + display: table; + width: 100%; + border-spacing: 0; + padding: 0; + margin: 0; + border: 0; + /* LineNumberColumnHighlight */ + .lntd:first-child .hl { + display: block; + } + } + } +} + +@utility section-card { + @apply flex h-full flex-col gap-2 rounded-sm border p-4 drop-shadow-xs hover:drop-shadow-lg; + @apply text-gray dark:text-gray-200; + @apply border-gray-100 bg-gray-50 hover:border-gray-200 dark:border-gray-600 dark:bg-gray-900 hover:dark:border-gray-500; +} + +@utility section-card-text { + @apply leading-snug text-gray-800 dark:text-gray-200; +} +@utility section-card-title { + @apply text-xl font-semibold text-gray-900 dark:text-gray-100; +} + +@utility sub-button { + @apply flex w-full items-center gap-2 rounded-sm px-2 py-2 text-left text-gray-600 transition-colors hover:bg-gray-50 dark:text-gray-100 dark:hover:bg-gray-800; +} + +@utility dropdown-base { + @apply rounded-sm border border-gray-300 bg-white text-gray-600 dark:border-gray-300 dark:bg-gray-900 dark:text-gray-100; +} + +@utility toc { + a { + @apply block max-w-full truncate py-1 pl-2 hover:font-medium hover:no-underline; + &[aria-current="true"], + &:hover { + @apply border-l-2 border-x-gray-200 bg-gradient-to-r from-gray-50 font-medium text-black dark:border-l-gray-300 dark:from-gray-900 dark:text-white; + } + &:not([aria-current="true"]) { + @apply text-gray-600 hover:text-black dark:text-gray-100 dark:hover:text-white; + } + } +} +@utility chip { + @apply border-divider-light dark:border-divider-dark inline-flex items-center gap-1 rounded-full border bg-gray-100 px-2 text-sm text-gray-800 select-none dark:bg-gray-700 dark:text-gray-200; +} + +@utility pagination-link { + @apply flex items-center justify-center rounded-sm p-2; +} + +@utility breadcrumbs { + font-size: 90%; +} + +@utility topbar-button { + @apply min-h-10 max-w-40 rounded-md border-1 border-blue-300 bg-(--topnav-button-bg) px-2 text-center font-semibold text-white; + @apply inline-flex items-center justify-center gap-1.5 transition-colors hover:border-blue-300 hover:bg-blue-400; + svg { + font-size: 19px; + } +} +@utility topbar-button-clear { + @apply min-h-9 px-0 text-center font-semibold text-white/95 transition-colors hover:text-white/85; + svg { + font-size: 19px; + } +} + +.footer { + @apply ml-auto hidden flex-row justify-between gap-6 px-4 pt-6 pb-2 md:flex; + @apply border-t border-gray-200 bg-gray-100 dark:border-gray-700 dark:bg-gray-900; + @apply text-gray-600 dark:text-gray-400; + a:hover { + @apply underline underline-offset-4; + } +} + +.social { + @apply flex min-w-20 flex-wrap items-center gap-1; +} + +.links { + @apply flex items-center gap-3; +} + +.links a { + @apply inline-flex min-w-15 truncate whitespace-normal; +} + +.secondaryLinks { + @apply flex items-center; + a, + button { + @apply whitespace-normal md:truncate; + } +} + +.secondaryLinks > *:not(:last-child)::after { + content: "|"; + @apply mx-1 text-gray-400; +} + +.ot-sdk-show-settings { + @apply !text-gray-600 hover:!text-gray-800 dark:!text-gray-400 dark:hover:!text-gray-200; + @apply !m-0 !min-w-15 !truncate !border-none !p-0 !text-sm; +} +#ot-sdk-btn.ot-sdk-show-settings:hover, +#ot-sdk-btn.optanon-show-settings:hover { + @apply !text-gray-600 underline decoration-1 underline-offset-4 hover:!bg-transparent dark:!text-gray-400; +} + +@keyframes reflection { + 0% { + transform: translateX(-100%); + } + 18% { + transform: translateX(100%); + } + 100% { + transform: translateX(100%); + } +} + +@utility shimmer { + position: relative; + overflow: hidden; + + & > * { + position: relative; + z-index: 2; + } + + &::after { + content: ''; + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: linear-gradient( + 110deg, + transparent 0%, + transparent 43%, + rgba(123, 164, 244, 0.25) 49%, + rgba(170, 196, 248, 0.45) 50%, + rgba(123, 164, 244, 0.25) 51%, + transparent 57%, + transparent 100% + ); + transform: translateX(-100%); + pointer-events: none; + z-index: 1; + + @media (prefers-reduced-motion: no-preference) { + animation: reflection 3s ease-in-out 3s forwards; + } + } + + &:hover { + @apply bg-blue-800 border-blue-400; + } +} diff --git a/assets/favicons/docs.ico b/assets/favicons/docs.ico deleted file mode 100644 index 7925783d50c..00000000000 Binary files a/assets/favicons/docs.ico and /dev/null differ diff --git a/assets/favicons/docs@2x.ico b/assets/favicons/docs@2x.ico deleted file mode 100644 index 523925f7bd7..00000000000 Binary files a/assets/favicons/docs@2x.ico and /dev/null differ diff --git a/assets/icons/AppleMac.svg b/assets/icons/AppleMac.svg new file mode 100644 index 00000000000..b218d8cdcaf --- /dev/null +++ b/assets/icons/AppleMac.svg @@ -0,0 +1,8 @@ +<svg width="24" height="24" viewBox="0 0 24 24" fill="blue" xmlns="http://www.w3.org/2000/svg"> +<mask id="mask0_26_122" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="0" y="0" width="24" height="24"> +<path d="M17.9549 21.7542C16.8041 22.871 15.5344 22.6969 14.3241 22.1703C13.0373 21.6331 11.861 21.5991 10.5021 22.1703C8.80981 22.9007 7.91165 22.6884 6.89246 21.7542C1.13826 15.8301 1.98759 6.80604 8.52741 6.46631C10.1135 6.55125 11.224 7.34324 12.1583 7.40906C13.5469 7.12666 14.8761 6.31768 16.3625 6.42385C18.1482 6.56823 19.4837 7.27317 20.3755 8.54079C16.7022 10.749 17.5728 15.5902 20.9467 16.9491C20.2715 18.7221 19.4052 20.4738 17.9528 21.769L17.9549 21.7542ZM12.0309 6.40261C11.8589 3.76971 13.9928 1.60393 16.4474 1.3916C16.785 4.42794 13.6871 6.69988 12.0309 6.40261Z" fill="currentColor"/> +</mask> +<g mask="url(#mask0_26_122)"> +<rect width="24" height="24" fill="currentColor" fill-opacity="0.9"/> +</g> +</svg> diff --git a/static/assets/icons/Compose.svg b/assets/icons/Compose.svg similarity index 100% rename from static/assets/icons/Compose.svg rename to assets/icons/Compose.svg diff --git a/assets/icons/Linux.svg b/assets/icons/Linux.svg new file mode 100644 index 00000000000..55554f63b63 --- /dev/null +++ b/assets/icons/Linux.svg @@ -0,0 +1,8 @@ +<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg"> +<mask id="mask0_26_113" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="3" y="1" width="18" height="22"> +<path d="M19.0689 16.3388C19.2698 16.4219 19.443 16.5636 19.5674 16.7466C19.6791 16.9182 19.7575 17.1255 19.8067 17.3685C19.8333 17.5113 19.8652 17.6431 19.8998 17.7667C19.9649 17.9982 20.0631 18.2185 20.1909 18.4202C20.2534 18.5204 20.3372 18.6303 20.4409 18.7525C20.53 18.8527 20.6097 18.9817 20.6802 19.1383C20.7451 19.2791 20.7801 19.4326 20.7826 19.5886C20.7877 19.7385 20.7442 19.8859 20.6589 20.0074C20.5722 20.1253 20.4665 20.227 20.3465 20.308C20.187 20.422 20.0208 20.5222 19.8479 20.6087C19.6751 20.6938 19.5023 20.7831 19.3295 20.8764C19.071 21.0036 18.8241 21.1543 18.5916 21.3268C18.3842 21.4847 18.1755 21.6837 17.9681 21.9281C17.7263 22.2078 17.4464 22.4497 17.1372 22.6462C16.9793 22.7497 16.8078 22.8292 16.628 22.8823C16.4552 22.9318 16.2824 22.9647 16.1095 22.9785C15.708 22.9785 15.3757 22.9071 15.1124 22.7643C14.8492 22.6215 14.6205 22.3606 14.4264 21.9817C14.3706 21.8732 14.3161 21.8059 14.2603 21.7771C14.174 21.7394 14.0833 21.714 13.9904 21.7016L12.9388 21.6165C12.5931 21.589 12.2408 21.5739 11.8805 21.5739C11.5813 21.5743 11.2826 21.5991 10.9872 21.648C10.6827 21.6988 10.3809 21.7593 10.0831 21.8307C10.022 21.8444 9.95949 21.8993 9.897 21.9913C9.81921 22.1029 9.73201 22.2072 9.63643 22.303C9.50692 22.4303 9.35599 22.5322 9.19106 22.6036C8.95605 22.6984 8.70552 22.7455 8.45322 22.7423C8.22455 22.7423 7.97861 22.7176 7.71537 22.6682C7.46911 22.6239 7.23097 22.5406 7.00944 22.421C6.6098 22.2099 6.18325 22.0583 5.74247 21.9707C5.32635 21.8924 4.88763 21.8197 4.42365 21.7565C4.26987 21.7358 4.11722 21.707 3.96632 21.67C3.82692 21.6375 3.69562 21.5753 3.58078 21.4874C3.4754 21.4069 3.38753 21.3045 3.32287 21.1867C3.25197 21.0367 3.21601 20.8717 3.21784 20.7048C3.21784 20.4824 3.24576 20.2682 3.3016 20.0609C3.35743 19.855 3.38801 19.6353 3.39466 19.4074C3.39466 19.2495 3.38402 19.0957 3.36408 18.9461C3.34209 18.7858 3.32789 18.6245 3.32154 18.4628C3.32154 18.0989 3.40529 17.8312 3.57147 17.6582C3.73766 17.4866 3.97962 17.3507 4.29868 17.2504C4.44043 17.2061 4.57392 17.1374 4.69353 17.0472C4.80325 16.9618 4.90719 16.8686 5.00462 16.7685C5.10354 16.6659 5.19383 16.5547 5.2745 16.4363C5.35826 16.3141 5.45132 16.1919 5.55502 16.0711C5.5757 16.0436 5.58693 16.0098 5.58692 15.975C5.58692 15.8885 5.58293 15.8061 5.57629 15.7278L5.55502 15.4697C5.55502 14.9837 5.62814 14.4949 5.77305 14.0006C5.91928 13.5064 6.1094 13.0258 6.34471 12.5521C6.57966 12.0813 6.84342 11.6264 7.1344 11.1902C7.42422 10.7549 7.71271 10.3499 7.99589 9.97783C8.38052 9.48054 8.68572 8.92294 8.89991 8.32616C9.09933 7.755 9.20436 7.13167 9.21101 6.45892C9.21101 6.17472 9.19771 5.89189 9.16979 5.61317C9.1419 5.32805 9.12814 5.04164 9.12858 4.75507C9.12858 4.18254 9.19106 3.66768 9.3147 3.21048C9.43967 2.75191 9.63377 2.35924 9.897 2.02973C10.1602 1.70022 10.5032 1.45446 10.9247 1.2897C11.3488 1.12495 11.854 1.0357 12.4416 1.02197C13.1422 1.02197 13.6992 1.16476 14.1154 1.45171C14.5301 1.73729 14.8492 2.11211 15.0699 2.57754C15.2919 3.04161 15.4342 3.56059 15.4953 4.13311C15.5591 4.70427 15.5937 5.28366 15.6003 5.86992V6.05252C15.6003 6.34634 15.603 6.6072 15.611 6.83511C15.6234 7.29848 15.7255 7.75459 15.9114 8.1765C16.0085 8.39755 16.1534 8.63782 16.3475 8.89456C16.6799 9.34627 17.0202 9.80072 17.3659 10.2579C17.7115 10.7151 18.0266 11.1874 18.3098 11.6721C18.5943 12.1595 18.8269 12.6675 19.0064 13.1961C19.1872 13.726 19.2803 14.2972 19.2869 14.9123C19.2887 15.3972 19.2151 15.8779 19.0689 16.3388ZM10.3557 5.16284C10.4514 5.16284 10.5312 5.18755 10.595 5.23698C10.6598 5.29087 10.7098 5.36142 10.7399 5.44155C10.7732 5.52401 10.7973 5.61016 10.8117 5.6983C10.825 5.78479 10.8329 5.87404 10.8329 5.9674C10.8351 6.02637 10.8242 6.08507 10.801 6.13902C10.7783 6.1846 10.7466 6.22481 10.708 6.25709L10.583 6.36419C10.5384 6.40315 10.4971 6.44586 10.4594 6.49187C10.4095 6.57203 10.3415 6.63847 10.2613 6.68546C10.1789 6.73482 10.0991 6.78843 10.022 6.8461C9.94833 6.90128 9.87898 6.96233 9.81458 7.0287C9.78239 7.06454 9.75755 7.10673 9.74155 7.15274C9.72556 7.19875 9.71874 7.24763 9.72152 7.29643C9.72152 7.36096 9.75209 7.4145 9.81458 7.45707C9.9696 7.5579 10.0926 7.7035 10.1682 7.87582C10.2373 8.04058 10.3145 8.20121 10.3969 8.35911C10.4793 8.51562 10.587 8.64743 10.7186 8.75589C10.8502 8.86298 11.055 8.91653 11.3302 8.91653H11.394C11.6705 8.9028 11.9337 8.82729 12.1837 8.69136C12.4323 8.55544 12.6822 8.41265 12.9308 8.26163C12.9741 8.23648 13.0195 8.21535 13.0664 8.19847C13.1147 8.18068 13.16 8.1552 13.2007 8.12296L13.9585 7.51199C13.9717 7.46596 13.9823 7.41921 13.9904 7.37194C13.9985 7.32175 14.0056 7.2714 14.0117 7.22092C14.014 7.15266 13.9954 7.08538 13.9585 7.0287C13.9245 6.9789 13.8827 6.93528 13.8348 6.89964C13.7817 6.86222 13.7214 6.83694 13.658 6.8255C13.5947 6.81237 13.5324 6.794 13.4719 6.77058C13.3062 6.7354 13.1478 6.67034 13.0039 6.57837C12.8611 6.48678 12.7074 6.41478 12.5466 6.36419C12.52 6.35595 12.4987 6.33535 12.4841 6.29966C12.4683 6.25738 12.4545 6.2143 12.4429 6.1706C12.4292 6.12533 12.4256 6.07747 12.4323 6.03055C12.4398 5.9946 12.4361 5.95708 12.4216 5.92346C12.4216 5.83834 12.4283 5.7491 12.4429 5.65573C12.4573 5.56122 12.4888 5.47037 12.536 5.38801C12.5765 5.31173 12.6333 5.24599 12.7021 5.19579C12.7841 5.14639 12.8771 5.11989 12.972 5.1189C13.1874 5.1189 13.3429 5.20128 13.44 5.36604C13.537 5.5308 13.5889 5.70928 13.5955 5.9015C13.5973 5.98693 13.5795 6.07159 13.5437 6.14863C13.5082 6.22192 13.49 6.30284 13.4905 6.38478C13.4905 6.44244 13.5091 6.48089 13.5437 6.50286C13.5856 6.52656 13.6317 6.54105 13.6793 6.54542C13.8388 6.54542 13.9412 6.50697 13.9904 6.42872C14.0463 6.3197 14.0747 6.19785 14.0728 6.07449C14.0728 5.90973 14.0595 5.72713 14.0329 5.52668C14.0066 5.33274 13.95 5.14449 13.8654 4.96925C13.7879 4.80468 13.6784 4.65838 13.5437 4.53951C13.3994 4.42114 13.2186 4.36067 13.0345 4.36927C12.6756 4.36927 12.415 4.46125 12.2554 4.64798C12.0959 4.83333 12.0135 5.11204 12.0068 5.48411C12.0068 5.59121 12.0135 5.6983 12.0268 5.80539C12.0401 5.91385 12.0481 6.02094 12.0481 6.12804C12.0481 6.16373 12.0441 6.18158 12.0374 6.18158C12.0159 6.17674 11.9949 6.16937 11.9749 6.15961C11.9335 6.14227 11.8923 6.12442 11.8513 6.10607C11.8005 6.08391 11.748 6.066 11.6944 6.05252C11.6061 6.03035 11.5143 6.02662 11.4245 6.04154C11.3694 6.04876 11.3139 6.05243 11.2584 6.05252C11.1613 6.05252 11.1121 6.03467 11.1121 5.99898C11.1121 5.89189 11.1068 5.75596 11.0922 5.59121C11.0789 5.42645 11.0443 5.26169 10.9885 5.09694C10.9421 4.95035 10.8719 4.81294 10.7811 4.69054C10.6973 4.58345 10.5737 4.52578 10.4075 4.51892C10.2959 4.51626 10.1876 4.55861 10.1057 4.63699C10.0204 4.71853 9.94695 4.81241 9.8877 4.91571C9.82399 5.02725 9.78495 5.15196 9.77336 5.28091C9.76221 5.39564 9.74847 5.51009 9.73215 5.62416C9.73749 5.81358 9.76882 6.00125 9.82521 6.18158C9.85978 6.29554 9.89833 6.39576 9.93955 6.48226C9.98076 6.56738 10.0299 6.60995 10.0858 6.60995C10.119 6.60995 10.1656 6.58523 10.2201 6.53581C10.2759 6.48501 10.3038 6.4397 10.3038 6.39576C10.3038 6.3738 10.2932 6.36007 10.2719 6.3532C10.2518 6.34605 10.2307 6.34234 10.2094 6.34222C10.1616 6.34222 10.119 6.3175 10.0845 6.2667C10.0488 6.21407 10.021 6.15622 10.002 6.09508C9.98021 6.02768 9.95938 5.95994 9.93955 5.89189C9.89814 5.70664 9.92417 5.51215 10.0127 5.34544C10.0751 5.23012 10.1895 5.1697 10.3557 5.16284ZM8.09028 21.8636C8.25646 21.8636 8.41998 21.8499 8.57952 21.8197C8.73745 21.7916 8.88893 21.7334 9.02621 21.648C9.15786 21.5694 9.2656 21.4543 9.3373 21.3158C9.41294 21.1549 9.45549 20.9796 9.46227 20.8009C9.46228 20.6732 9.44115 20.5465 9.39979 20.4261C9.35755 20.3033 9.29797 20.1877 9.22297 20.0829C9.14092 19.9486 9.05078 19.8197 8.95309 19.6971C8.85536 19.574 8.76522 19.4447 8.68321 19.3099C8.55392 19.1136 8.4254 18.9169 8.29767 18.7195C8.17403 18.5273 8.04907 18.3241 7.92543 18.1085C7.81611 17.9259 7.71193 17.74 7.61301 17.5511C7.51136 17.3583 7.39299 17.1754 7.25937 17.0047C7.17499 16.8959 7.07761 16.7985 6.96955 16.715C6.85854 16.6288 6.72285 16.5834 6.58401 16.5859C6.43917 16.5837 6.29878 16.6376 6.19049 16.7369C6.06673 16.8483 5.95201 16.97 5.84749 17.1008C5.72491 17.2492 5.59301 17.389 5.45265 17.5195C5.30641 17.6555 5.12029 17.7626 4.89162 17.8408C4.73688 17.8837 4.59453 17.9647 4.47683 18.077C4.37978 18.1772 4.33059 18.331 4.33059 18.5383C4.33059 18.6811 4.34123 18.8239 4.3625 18.9666C4.38244 19.1094 4.39706 19.2536 4.40371 19.3964C4.40371 19.5886 4.37712 19.7712 4.31996 19.9428C4.26633 20.1051 4.23849 20.2752 4.23753 20.4467C4.23753 20.6403 4.304 20.7762 4.43429 20.8545C4.56723 20.9341 4.7148 20.9877 4.88098 21.0151C5.08971 21.0522 5.28647 21.0796 5.47392 21.1016C5.66004 21.1236 5.84351 21.1565 6.02431 21.1977C6.20379 21.2417 6.38326 21.287 6.56407 21.3378C6.74354 21.3872 6.93499 21.4668 7.13573 21.5739C7.17562 21.5945 7.23943 21.6206 7.32186 21.648L7.60237 21.7442C7.70607 21.7812 7.80312 21.8101 7.89352 21.8307C7.95786 21.8485 8.02378 21.8595 8.09028 21.8636ZM11.8925 20.5758C12.0866 20.5758 12.294 20.5552 12.516 20.5112C12.968 20.4238 13.4052 20.2683 13.8136 20.0499C14.0037 19.9498 14.1844 19.8317 14.3533 19.6971C14.3701 19.6775 14.3844 19.6558 14.3959 19.6325C14.4091 19.605 14.4194 19.5759 14.4264 19.546V19.5351C14.4677 19.3785 14.5022 19.2055 14.5301 19.0202C14.5589 18.8314 14.5833 18.6418 14.6033 18.4518C14.6234 18.2618 14.6477 18.0722 14.6764 17.8834C14.703 17.698 14.7282 17.5154 14.7482 17.3369C14.7761 17.1447 14.8107 16.958 14.8519 16.7795C14.8944 16.601 14.9489 16.4322 15.018 16.2743C15.0864 16.1188 15.1814 15.9774 15.2986 15.8569C15.4349 15.7212 15.5888 15.6057 15.7559 15.5136V15.4917L15.7453 15.4601C15.7476 15.3873 15.7692 15.3165 15.8077 15.2555C15.8543 15.1767 15.9063 15.1015 15.9633 15.0304C16.0186 14.9581 16.0857 14.8964 16.1614 14.8477C16.2306 14.8049 16.3036 14.769 16.3794 14.7407C16.3375 14.5681 16.2923 14.3965 16.2438 14.2258C16.1957 14.0559 16.154 13.8842 16.1188 13.7109C16.0848 13.4917 16.0436 13.2737 15.9952 13.0574C15.9567 12.8896 15.9078 12.7245 15.849 12.5631C15.7894 12.4059 15.7089 12.258 15.6097 12.1238C15.5073 11.981 15.3783 11.808 15.2268 11.6089C15.1689 11.5421 15.1164 11.4705 15.0699 11.3947C15.0338 11.3096 15.0131 11.2184 15.0087 11.1256C14.9922 11.0318 14.9713 10.9388 14.9463 10.8469C14.8529 10.4992 14.7455 10.1557 14.6245 9.8172C14.5607 9.63646 14.4842 9.46072 14.3959 9.29135C14.3252 9.15573 14.249 9.02334 14.1672 8.89456C14.0981 8.78747 14.0249 8.73393 13.9492 8.73393C13.783 8.73393 13.5849 8.80257 13.3562 8.9385C13.1289 9.07442 12.8856 9.22408 12.6304 9.38883C12.3738 9.55359 12.1252 9.70736 11.8819 9.85015C11.6399 9.99294 11.4219 10.0602 11.2278 10.0533C11.0206 10.0532 10.8195 9.9817 10.6561 9.85015C10.4801 9.71161 10.3195 9.55319 10.1775 9.37785C10.0326 9.19936 9.91163 9.04559 9.81458 8.91653C9.71753 8.78747 9.64574 8.71608 9.59655 8.70235C9.54204 8.70235 9.51013 8.74491 9.50349 8.83003C9.49608 8.92641 9.49254 9.02305 9.49285 9.11973V9.30233C9.49285 9.34627 9.4862 9.37373 9.47158 9.38883C9.3958 9.55359 9.31205 9.71422 9.22297 9.87074C9.13257 10.0286 9.0435 10.1893 8.95309 10.354C8.77401 10.6706 8.67396 11.0283 8.66194 11.3947C8.66194 11.5018 8.66859 11.6089 8.68321 11.716C8.69651 11.8231 8.73107 11.9274 8.78691 12.0277L8.76564 12.0689C8.71227 12.1454 8.65314 12.2175 8.58882 12.2844C8.52332 12.3533 8.46454 12.4288 8.41334 12.5096C8.15904 12.8975 7.97566 13.3303 7.87225 13.7864C7.76855 14.2436 7.71271 14.7091 7.70607 15.18C7.70607 15.3022 7.71404 15.4244 7.72734 15.5452C7.74063 15.6674 7.74861 15.7882 7.74861 15.9104C7.7482 15.95 7.74465 15.9895 7.73797 16.0285C7.73129 16.0675 7.72774 16.107 7.72734 16.1466C7.87512 16.1638 8.01729 16.2149 8.14346 16.2962C8.30299 16.3896 8.47582 16.5077 8.66194 16.6505C8.84807 16.7932 9.02621 16.958 9.19239 17.1447C9.35858 17.3301 9.51678 17.5113 9.66967 17.6912C9.82255 17.8696 9.9329 18.0481 10.002 18.2266C10.0712 18.4051 10.1164 18.552 10.1363 18.666C10.1399 18.749 10.127 18.8319 10.0984 18.9095C10.0698 18.9872 10.0262 19.0581 9.97012 19.1177C9.8509 19.2378 9.70996 19.3325 9.55533 19.3964C9.67366 19.6106 9.81857 19.7932 9.99139 19.9428C10.1642 20.0939 10.3517 20.2147 10.5524 20.308C10.7532 20.4014 10.9712 20.4687 11.2065 20.5112C11.4418 20.5552 11.6718 20.5758 11.8925 20.5758ZM16.0577 22.0998C16.2026 22.0998 16.3448 22.0778 16.4831 22.0352C16.6926 21.9732 16.892 21.8793 17.0747 21.7565C17.2555 21.6343 17.4204 21.4847 17.5733 21.3048C17.9344 20.8745 18.3763 20.5242 18.8721 20.2751L19.2151 20.1144C19.3328 20.0615 19.4469 20.0005 19.5568 19.9318C19.6361 19.8799 19.7097 19.8192 19.7762 19.7506C19.8096 19.7172 19.8361 19.677 19.8539 19.6327C19.8717 19.5883 19.8806 19.5406 19.8799 19.4925C19.8799 19.4373 19.8691 19.3826 19.8479 19.3319C19.8231 19.2747 19.7919 19.2208 19.7549 19.1712C19.6601 19.035 19.5701 18.8953 19.485 18.7525C19.408 18.6225 19.3418 18.4861 19.2869 18.3447L19.1207 17.9163C19.0608 17.7591 19.0119 17.5976 18.9745 17.433C18.9601 17.3822 18.9356 17.335 18.9027 17.2944C18.8359 17.1912 18.7446 17.1074 18.6376 17.0509C18.5305 16.9944 18.4113 16.9672 18.2912 16.9717C18.1672 16.9713 18.0452 17.0044 17.9375 17.0678L17.5839 17.2724C17.4608 17.3429 17.3362 17.4107 17.2103 17.4756C17.0878 17.5392 16.9526 17.5722 16.8155 17.5717C16.6929 17.5772 16.5721 17.54 16.4725 17.466C16.3769 17.3876 16.2989 17.2888 16.2438 17.1763C16.1847 17.0618 16.1327 16.9434 16.0883 16.8221L15.9633 16.4678C15.9091 16.5597 15.8468 16.6461 15.7772 16.726C15.706 16.8078 15.6497 16.9022 15.611 17.0047C15.5206 17.2189 15.4647 17.4578 15.4448 17.7227C15.4102 18.1374 15.365 18.541 15.3092 18.9351C15.2523 19.3368 15.1588 19.7322 15.03 20.1158C14.9933 20.2385 14.9653 20.3638 14.9463 20.4906C14.9256 20.6221 14.9114 20.7545 14.9037 20.8874C14.9037 21.2375 15.0087 21.5272 15.2161 21.7565C15.4235 21.9844 15.704 22.0998 16.0577 22.0998Z" fill="currentColor"/> +</mask> +<g mask="url(#mask0_26_113)"> +<rect width="24" height="24" fill="currentColor" fill-opacity="0.9"/> +</g> +</svg> diff --git a/static/assets/icons/Scout.svg b/assets/icons/Scout.svg similarity index 100% rename from static/assets/icons/Scout.svg rename to assets/icons/Scout.svg diff --git a/static/assets/icons/Testcontainers.svg b/assets/icons/Testcontainers.svg similarity index 100% rename from static/assets/icons/Testcontainers.svg rename to assets/icons/Testcontainers.svg diff --git a/static/assets/icons/Whale.svg b/assets/icons/Whale.svg similarity index 100% rename from static/assets/icons/Whale.svg rename to assets/icons/Whale.svg diff --git a/assets/icons/Windows.svg b/assets/icons/Windows.svg new file mode 100644 index 00000000000..7244da36d97 --- /dev/null +++ b/assets/icons/Windows.svg @@ -0,0 +1,8 @@ +<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg"> +<mask id="mask0_26_117" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="0" y="0" width="24" height="24"> +<path d="M2.8584 11.5731H10.2447V4.31147L2.8584 5.34754V11.5731ZM2.8584 18.6499L10.2447 19.682V12.503H2.8584V18.6499ZM11.0617 4.19737V11.5731H20.89V2.82031L11.0591 4.19737H11.0617ZM11.0617 19.7961L20.89 21.1732V12.5043H11.0591V19.7961H11.0617Z" fill="currentColor"/> +</mask> +<g mask="url(#mask0_26_117)"> +<rect width="24" height="24" fill="currentColor" fill-opacity="0.9"/> +</g> +</svg> diff --git a/assets/icons/cagent.svg b/assets/icons/cagent.svg new file mode 100644 index 00000000000..669a0e7b3fb --- /dev/null +++ b/assets/icons/cagent.svg @@ -0,0 +1,14 @@ +<svg width="825" height="824" viewBox="0 0 825 824" fill="none" xmlns="http://www.w3.org/2000/svg"> +<rect x="0.883789" width="824" height="824" rx="158" fill="#080B0F"/> +<mask id="path-2-inside-1_52_117" fill="white"> +<path d="M277.566 426.88C338.755 427.655 388.119 477.498 388.119 538.871L388.11 540.318C387.335 601.507 337.492 650.871 276.119 650.871L274.672 650.862C213.965 650.094 164.897 601.026 164.128 540.318L164.119 538.871C164.119 477.015 214.263 426.871 276.119 426.871L277.566 426.88ZM551.096 426.862C612.285 427.637 661.648 477.481 661.648 538.854L661.64 540.301C660.865 601.489 611.021 650.853 549.648 650.854L548.201 650.845C487.012 650.07 437.649 600.226 437.648 538.854C437.648 476.998 487.793 426.854 549.648 426.854L551.096 426.862ZM276.119 476.871C241.877 476.871 214.119 504.629 214.119 538.871C214.119 573.113 241.877 600.871 276.119 600.871C310.361 600.871 338.119 573.113 338.119 538.871C338.119 504.629 310.361 476.871 276.119 476.871ZM549.648 476.854C515.407 476.854 487.648 504.612 487.648 538.854C487.649 573.095 515.407 600.854 549.648 600.854C583.89 600.853 611.648 573.095 611.648 538.854C611.648 504.612 583.89 476.854 549.648 476.854ZM412.569 188C474.425 188 524.569 238.144 524.569 300C524.569 361.856 474.425 412 412.569 412C350.713 412 300.569 361.856 300.569 300C300.569 238.144 350.713 188 412.569 188ZM412.569 238C378.328 238 350.569 265.758 350.569 300C350.569 334.242 378.328 362 412.569 362C446.811 362 474.569 334.241 474.569 300C474.569 265.759 446.811 238 412.569 238Z"/> +</mask> +<path d="M277.566 426.88C338.755 427.655 388.119 477.498 388.119 538.871L388.11 540.318C387.335 601.507 337.492 650.871 276.119 650.871L274.672 650.862C213.965 650.094 164.897 601.026 164.128 540.318L164.119 538.871C164.119 477.015 214.263 426.871 276.119 426.871L277.566 426.88ZM551.096 426.862C612.285 427.637 661.648 477.481 661.648 538.854L661.64 540.301C660.865 601.489 611.021 650.853 549.648 650.854L548.201 650.845C487.012 650.07 437.649 600.226 437.648 538.854C437.648 476.998 487.793 426.854 549.648 426.854L551.096 426.862ZM276.119 476.871C241.877 476.871 214.119 504.629 214.119 538.871C214.119 573.113 241.877 600.871 276.119 600.871C310.361 600.871 338.119 573.113 338.119 538.871C338.119 504.629 310.361 476.871 276.119 476.871ZM549.648 476.854C515.407 476.854 487.648 504.612 487.648 538.854C487.649 573.095 515.407 600.854 549.648 600.854C583.89 600.853 611.648 573.095 611.648 538.854C611.648 504.612 583.89 476.854 549.648 476.854ZM412.569 188C474.425 188 524.569 238.144 524.569 300C524.569 361.856 474.425 412 412.569 412C350.713 412 300.569 361.856 300.569 300C300.569 238.144 350.713 188 412.569 188ZM412.569 238C378.328 238 350.569 265.758 350.569 300C350.569 334.242 378.328 362 412.569 362C446.811 362 474.569 334.241 474.569 300C474.569 265.759 446.811 238 412.569 238Z" fill="url(#paint0_radial_52_117)"/> +<path d="M277.566 426.88L277.883 401.882L277.801 401.881L277.718 401.88L277.566 426.88ZM388.119 538.871L413.119 539.023L413.119 538.947V538.871H388.119ZM388.11 540.318L413.108 540.635L413.109 540.553L413.11 540.47L388.11 540.318ZM276.119 650.871L275.967 675.871L276.043 675.871H276.119V650.871ZM274.672 650.862L274.355 675.86L274.438 675.861L274.52 675.862L274.672 650.862ZM164.128 540.318L139.128 540.47L139.129 540.553L139.13 540.635L164.128 540.318ZM164.119 538.871H139.119V538.947L139.12 539.023L164.119 538.871ZM276.119 426.871L276.271 401.872L276.195 401.871H276.119V426.871ZM551.096 426.862L551.412 401.864L551.33 401.863L551.248 401.863L551.096 426.862ZM661.648 538.854L686.648 539.005L686.648 538.929V538.854H661.648ZM661.64 540.301L686.638 540.617L686.639 540.535L686.639 540.453L661.64 540.301ZM549.648 650.854L549.497 675.853L549.573 675.854H549.648L549.648 650.854ZM548.201 650.845L547.885 675.843L547.967 675.844L548.049 675.844L548.201 650.845ZM437.648 538.854H412.648V538.854L437.648 538.854ZM549.648 426.854L549.8 401.854L549.724 401.854H549.648V426.854ZM276.119 476.871V451.871V476.871ZM214.119 538.871H189.119H214.119ZM276.119 600.871V625.871V600.871ZM338.119 538.871H363.119H338.119ZM549.648 476.854L549.648 451.854H549.648V476.854ZM487.648 538.854L462.648 538.853V538.854L487.648 538.854ZM549.648 600.854V625.854H549.648L549.648 600.854ZM611.648 538.854L636.648 538.854V538.853L611.648 538.854ZM412.569 188L412.569 163H412.569V188ZM524.569 300H549.569V300L524.569 300ZM412.569 412V437H412.569L412.569 412ZM300.569 300L275.569 300V300H300.569ZM412.569 238L412.57 213H412.569V238ZM350.569 300L325.569 300V300H350.569ZM412.569 362V387H412.57L412.569 362ZM474.569 300H499.569V300L474.569 300ZM277.566 426.88L277.25 451.878C324.773 452.48 363.119 491.199 363.119 538.871H388.119H413.119C413.119 463.797 352.737 402.83 277.883 401.882L277.566 426.88ZM388.119 538.871L363.12 538.719L363.111 540.167L388.11 540.318L413.11 540.47L413.119 539.023L388.119 538.871ZM388.11 540.318L363.112 540.002C362.511 587.525 323.791 625.871 276.119 625.871V650.871V675.871C351.193 675.871 412.16 615.489 413.108 540.635L388.11 540.318ZM276.119 650.871L276.271 625.872L274.824 625.863L274.672 650.862L274.52 675.862L275.967 675.871L276.119 650.871ZM274.672 650.862L274.988 625.864C227.841 625.267 189.723 587.15 189.126 540.002L164.128 540.318L139.13 540.635C140.07 614.902 200.089 674.92 274.355 675.86L274.672 650.862ZM164.128 540.318L189.127 540.167L189.119 538.719L164.119 538.871L139.12 539.023L139.128 540.47L164.128 540.318ZM164.119 538.871H189.119C189.119 490.822 228.07 451.871 276.119 451.871V426.871V401.871C200.456 401.871 139.119 463.208 139.119 538.871H164.119ZM276.119 426.871L275.967 451.871L277.415 451.879L277.566 426.88L277.718 401.88L276.271 401.872L276.119 426.871ZM551.096 426.862L550.779 451.86C598.303 452.462 636.648 491.182 636.648 538.854H661.648H686.648C686.648 463.78 626.266 402.812 551.412 401.864L551.096 426.862ZM661.648 538.854L636.649 538.702L636.64 540.149L661.64 540.301L686.639 540.453L686.648 539.005L661.648 538.854ZM661.64 540.301L636.642 539.984C636.04 587.508 597.32 625.853 549.648 625.854L549.648 650.854L549.648 675.854C624.722 675.853 685.689 615.471 686.638 540.617L661.64 540.301ZM549.648 650.854L549.8 625.854L548.353 625.845L548.201 650.845L548.049 675.844L549.497 675.853L549.648 650.854ZM548.201 650.845L548.518 625.847C500.994 625.245 462.649 586.525 462.648 538.853L437.648 538.854L412.648 538.854C412.649 613.927 473.031 674.895 547.885 675.843L548.201 650.845ZM437.648 538.854H462.648C462.648 490.805 501.6 451.854 549.648 451.854V426.854V401.854C473.985 401.854 412.648 463.191 412.648 538.854H437.648ZM549.648 426.854L549.497 451.853L550.944 451.862L551.096 426.862L551.248 401.863L549.8 401.854L549.648 426.854ZM276.119 476.871V451.871C228.07 451.871 189.119 490.822 189.119 538.871H214.119H239.119C239.119 518.437 255.685 501.871 276.119 501.871V476.871ZM214.119 538.871H189.119C189.119 586.92 228.07 625.871 276.119 625.871V600.871V575.871C255.685 575.871 239.119 559.306 239.119 538.871H214.119ZM276.119 600.871V625.871C324.168 625.871 363.119 586.92 363.119 538.871H338.119H313.119C313.119 559.306 296.554 575.871 276.119 575.871V600.871ZM338.119 538.871H363.119C363.119 490.822 324.168 451.871 276.119 451.871V476.871V501.871C296.554 501.871 313.119 518.437 313.119 538.871H338.119ZM549.648 476.854V451.854C501.6 451.854 462.648 490.805 462.648 538.853L487.648 538.854L512.648 538.854C512.648 518.419 529.214 501.854 549.648 501.854V476.854ZM487.648 538.854L462.648 538.854C462.649 586.902 501.6 625.854 549.648 625.854V600.854V575.854C529.214 575.854 512.649 559.288 512.648 538.853L487.648 538.854ZM549.648 600.854L549.648 625.854C597.697 625.853 636.648 586.902 636.648 538.854L611.648 538.854L586.648 538.853C586.648 559.288 570.083 575.853 549.648 575.854L549.648 600.854ZM611.648 538.854L636.648 538.853C636.648 490.805 597.697 451.854 549.648 451.854L549.648 476.854L549.648 501.854C570.083 501.854 586.648 518.419 586.648 538.854L611.648 538.854ZM412.569 188L412.569 213C460.618 213 499.569 251.952 499.569 300L524.569 300L549.569 300C549.569 224.337 488.232 163 412.569 163L412.569 188ZM524.569 300H499.569C499.569 348.049 460.618 387 412.569 387L412.569 412L412.569 437C488.232 437 549.569 375.663 549.569 300H524.569ZM412.569 412V387C364.521 387 325.569 348.049 325.569 300H300.569H275.569C275.569 375.663 336.906 437 412.569 437V412ZM300.569 300L325.569 300C325.569 251.951 364.521 213 412.569 213V188V163C336.906 163 275.569 224.337 275.569 300L300.569 300ZM412.569 238V213C364.521 213 325.569 251.951 325.569 300L350.569 300L375.569 300C375.569 279.565 392.135 263 412.569 263V238ZM350.569 300H325.569C325.569 348.049 364.521 387 412.569 387V362V337C392.135 337 375.569 320.435 375.569 300H350.569ZM412.569 362L412.57 387C460.618 387 499.569 348.049 499.569 300H474.569H449.569C449.569 320.434 433.004 337 412.569 337L412.569 362ZM474.569 300L499.569 300C499.569 251.951 460.618 213 412.57 213L412.569 238L412.569 263C433.004 263 449.569 279.566 449.569 300L474.569 300Z" fill="white" mask="url(#path-2-inside-1_52_117)"/> +<defs> +<radialGradient id="paint0_radial_52_117" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(412.884 198.029) rotate(90) scale(807.954 883.425)"> +<stop stop-color="#B53CFD"/> +<stop offset="1" stop-color="#1D2736"/> +</radialGradient> +</defs> +</svg> diff --git a/assets/icons/claude.svg b/assets/icons/claude.svg new file mode 100644 index 00000000000..e29f3282572 --- /dev/null +++ b/assets/icons/claude.svg @@ -0,0 +1 @@ +<svg fill="currentColor" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Claude \ No newline at end of file diff --git a/assets/icons/dhi.svg b/assets/icons/dhi.svg new file mode 100644 index 00000000000..91d51544005 --- /dev/null +++ b/assets/icons/dhi.svg @@ -0,0 +1,13 @@ + \ No newline at end of file diff --git a/assets/icons/facebook.svg b/assets/icons/facebook.svg new file mode 100644 index 00000000000..e3e31d89316 --- /dev/null +++ b/assets/icons/facebook.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/static/assets/icons/go.svg b/assets/icons/go.svg similarity index 100% rename from static/assets/icons/go.svg rename to assets/icons/go.svg diff --git a/assets/icons/gordon-happy.svg b/assets/icons/gordon-happy.svg new file mode 100644 index 00000000000..072740635b6 --- /dev/null +++ b/assets/icons/gordon-happy.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/assets/icons/gordon.svg b/assets/icons/gordon.svg new file mode 100644 index 00000000000..91442d6a36a --- /dev/null +++ b/assets/icons/gordon.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/assets/icons/headset.svg b/assets/icons/headset.svg new file mode 100644 index 00000000000..700d4c54608 --- /dev/null +++ b/assets/icons/headset.svg @@ -0,0 +1,3 @@ + + + diff --git a/assets/icons/instagram.svg b/assets/icons/instagram.svg new file mode 100644 index 00000000000..d0acf82f83c --- /dev/null +++ b/assets/icons/instagram.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/static/assets/icons/java.svg b/assets/icons/java.svg similarity index 100% rename from static/assets/icons/java.svg rename to assets/icons/java.svg diff --git a/assets/icons/linkedin.svg b/assets/icons/linkedin.svg new file mode 100644 index 00000000000..997fa8bb51f --- /dev/null +++ b/assets/icons/linkedin.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/static/assets/images/logo-build-cloud.svg b/assets/icons/logo-build-cloud.svg similarity index 100% rename from static/assets/images/logo-build-cloud.svg rename to assets/icons/logo-build-cloud.svg diff --git a/assets/icons/models.svg b/assets/icons/models.svg new file mode 100644 index 00000000000..581f3621afb --- /dev/null +++ b/assets/icons/models.svg @@ -0,0 +1,3 @@ + + + diff --git a/assets/icons/moon.svg b/assets/icons/moon.svg new file mode 100644 index 00000000000..0bc248ff514 --- /dev/null +++ b/assets/icons/moon.svg @@ -0,0 +1,8 @@ + + + + + + + diff --git a/assets/icons/openai.svg b/assets/icons/openai.svg new file mode 100644 index 00000000000..50d94d6c108 --- /dev/null +++ b/assets/icons/openai.svg @@ -0,0 +1 @@ +OpenAI \ No newline at end of file diff --git a/assets/icons/search.svg b/assets/icons/search.svg new file mode 100644 index 00000000000..4f8a0b6c017 --- /dev/null +++ b/assets/icons/search.svg @@ -0,0 +1,8 @@ + + + + + + + diff --git a/assets/icons/sparkle.svg b/assets/icons/sparkle.svg new file mode 100644 index 00000000000..97ce7a10cbf --- /dev/null +++ b/assets/icons/sparkle.svg @@ -0,0 +1,8 @@ + + + + + + + diff --git a/assets/icons/sun.svg b/assets/icons/sun.svg new file mode 100644 index 00000000000..5eb18a1d1d7 --- /dev/null +++ b/assets/icons/sun.svg @@ -0,0 +1,8 @@ + + + + + + + diff --git a/assets/icons/toolkit.svg b/assets/icons/toolkit.svg new file mode 100644 index 00000000000..ef4c016dc5c --- /dev/null +++ b/assets/icons/toolkit.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/assets/icons/twitter.svg b/assets/icons/twitter.svg new file mode 100644 index 00000000000..67893368f73 --- /dev/null +++ b/assets/icons/twitter.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/assets/icons/youtube.svg b/assets/icons/youtube.svg new file mode 100644 index 00000000000..86a34ce77f0 --- /dev/null +++ b/assets/icons/youtube.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/assets/images/FacebookCircle.svg b/assets/images/FacebookCircle.svg deleted file mode 100644 index d6ad69f938e..00000000000 --- a/assets/images/FacebookCircle.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/images/InstagramCircle.svg b/assets/images/InstagramCircle.svg deleted file mode 100644 index 7021c0c0f5e..00000000000 --- a/assets/images/InstagramCircle.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/images/LinkedinCircle.svg b/assets/images/LinkedinCircle.svg deleted file mode 100644 index a1d860f7f59..00000000000 --- a/assets/images/LinkedinCircle.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/images/TwitterCircle.svg b/assets/images/TwitterCircle.svg deleted file mode 100644 index 9eac86eefeb..00000000000 --- a/assets/images/TwitterCircle.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/assets/images/YoutubeCircle.svg b/assets/images/YoutubeCircle.svg deleted file mode 100644 index 23678de57f9..00000000000 --- a/assets/images/YoutubeCircle.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/images/ai-stars.svg b/assets/images/ai-stars.svg deleted file mode 100644 index b7c6a9085c0..00000000000 --- a/assets/images/ai-stars.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - - - diff --git a/static/assets/images/logo-icon-white.svg b/assets/images/ask-ai-logo.svg similarity index 100% rename from static/assets/images/logo-icon-white.svg rename to assets/images/ask-ai-logo.svg diff --git a/assets/images/gordon-logo.svg b/assets/images/gordon-logo.svg new file mode 100644 index 00000000000..d19e48fa61c --- /dev/null +++ b/assets/images/gordon-logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/gordon-robot.svg b/assets/images/gordon-robot.svg new file mode 100644 index 00000000000..9b315eadeba --- /dev/null +++ b/assets/images/gordon-robot.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/guides/dhi-examples-go.webp b/assets/images/guides/dhi-examples-go.webp new file mode 100644 index 00000000000..3f5a440739e Binary files /dev/null and b/assets/images/guides/dhi-examples-go.webp differ diff --git a/assets/images/guides/dhi-examples-nodejs.webp b/assets/images/guides/dhi-examples-nodejs.webp new file mode 100644 index 00000000000..e7627866afc Binary files /dev/null and b/assets/images/guides/dhi-examples-nodejs.webp differ diff --git a/assets/images/guides/dhi-examples-python.webp b/assets/images/guides/dhi-examples-python.webp new file mode 100644 index 00000000000..4326509bc11 Binary files /dev/null and b/assets/images/guides/dhi-examples-python.webp differ diff --git a/assets/images/guides/dhi-migrate-doi.webp b/assets/images/guides/dhi-migrate-doi.webp new file mode 100644 index 00000000000..1157dababe4 Binary files /dev/null and b/assets/images/guides/dhi-migrate-doi.webp differ diff --git a/assets/images/guides/dhi-migrate-wolfi.webp b/assets/images/guides/dhi-migrate-wolfi.webp new file mode 100644 index 00000000000..b6a0dd0c63f Binary files /dev/null and b/assets/images/guides/dhi-migrate-wolfi.webp differ diff --git a/assets/images/guides/ros2.jpg b/assets/images/guides/ros2.jpg new file mode 100644 index 00000000000..98d697619ef Binary files /dev/null and b/assets/images/guides/ros2.jpg differ diff --git a/assets/images/search-ai.svg b/assets/images/search-ai.svg deleted file mode 100644 index a898a28113c..00000000000 --- a/assets/images/search-ai.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/assets/js/src/alpine.js b/assets/js/src/alpine.js index cdcdfeb9305..c314dc641cd 100644 --- a/assets/js/src/alpine.js +++ b/assets/js/src/alpine.js @@ -2,12 +2,110 @@ import Alpine from 'alpinejs' import collapse from '@alpinejs/collapse' import persist from '@alpinejs/persist' import focus from '@alpinejs/focus' - +import { marked } from 'marked' +import hljs from 'highlight.js/lib/core' +// Import languages relevant to Docker docs +import bash from 'highlight.js/lib/languages/bash' +import dockerfile from 'highlight.js/lib/languages/dockerfile' +import yaml from 'highlight.js/lib/languages/yaml' +import json from 'highlight.js/lib/languages/json' +import javascript from 'highlight.js/lib/languages/javascript' +import python from 'highlight.js/lib/languages/python' +import go from 'highlight.js/lib/languages/go' + window.Alpine = Alpine Alpine.plugin(collapse) Alpine.plugin(persist) Alpine.plugin(focus) + +// Register highlight.js languages +hljs.registerLanguage('bash', bash) +hljs.registerLanguage('sh', bash) +hljs.registerLanguage('shell', bash) +hljs.registerLanguage('console', bash) +hljs.registerLanguage('dockerfile', dockerfile) +hljs.registerLanguage('yaml', yaml) +hljs.registerLanguage('yml', yaml) +hljs.registerLanguage('json', json) +hljs.registerLanguage('javascript', javascript) +hljs.registerLanguage('js', javascript) +hljs.registerLanguage('python', python) +hljs.registerLanguage('py', python) +hljs.registerLanguage('go', go) +hljs.registerLanguage('golang', go) + +// Configure marked to escape HTML in text tokens only (not code blocks) +marked.use({ + walkTokens(token) { + // Escape HTML in text and HTML tokens, preserve code blocks + if (token.type === 'text' || token.type === 'html') { + const text = token.text || token.raw + const escaped = text + .replace(/&/g, '&') + .replace(//g, '>') + if (token.text) token.text = escaped + if (token.raw) token.raw = escaped + } + } +}) + +// Add $markdown magic for rendering markdown with syntax highlighting +Alpine.magic('markdown', () => { + return (content) => { + if (!content) return '' + const html = marked(content) + + // Parse and highlight code blocks + const div = document.createElement('div') + div.innerHTML = html + + // Handle code blocks (pre > code) + div.querySelectorAll('pre').forEach((pre) => { + // Add not-prose to prevent Tailwind Typography styling + pre.classList.add('not-prose') + const code = pre.querySelector('code') + if (code) { + // Preserve the original text with newlines + const codeText = code.textContent + + // Clear and set as plain text first to preserve structure + code.textContent = codeText + + // Now apply highlight.js which will work with the text nodes + hljs.highlightElement(code) + } + }) + + // Handle inline code elements (not in pre blocks) + div.querySelectorAll('code:not(pre code)').forEach((code) => { + code.classList.add('not-prose') + }) + + return div.innerHTML + } +}) + +// Stores Alpine.store("showSidebar", false) +Alpine.store('gordon', { + isOpen: false, + query: '', + autoSubmit: false, + toggle() { + this.isOpen = !this.isOpen + }, + open(query, autoSubmit = false) { + this.isOpen = true + if (query) { + this.query = query + this.autoSubmit = autoSubmit + } + }, + close() { + this.isOpen = false + } +}) Alpine.start() diff --git a/assets/js/theme.js b/assets/js/theme.js index 95faa9a8f3a..27852bd5c02 100644 --- a/assets/js/theme.js +++ b/assets/js/theme.js @@ -1,20 +1,10 @@ -// return 'light' or 'dark' depending on localStorage (pref) or system setting -function getThemePreference() { - const theme = localStorage.getItem("theme-preference"); - if (theme) return theme; - else - return window.matchMedia("(prefers-color-scheme: dark)").matches +// update root class based on os setting or localstorage +const storedTheme = localStorage.getItem("theme-preference"); +const prefersDark = window.matchMedia("(prefers-color-scheme: dark)").matches; +document.firstElementChild.className = + storedTheme === "dark" || storedTheme === "light" + ? storedTheme + : prefersDark ? "dark" : "light"; -} - -// update root class based on os setting or localstorage -const preference = getThemePreference(); -document.firstElementChild.className = preference === "dark" ? "dark" : "light"; -localStorage.setItem("theme-preference", preference); - -// set innertext for the theme switch button -// window.addEventListener("DOMContentLoaded", () => { -// const themeSwitchButton = document.querySelector("#theme-switch"); -// themeSwitchButton.textContent = `${preference}_mode`; -// }); +document.firstElementChild.dataset.themePreference = storedTheme || "auto"; diff --git a/assets/theme/icons/edit.svg b/assets/theme/icons/edit.svg new file mode 100644 index 00000000000..2ee5ec5d2be --- /dev/null +++ b/assets/theme/icons/edit.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/assets/theme/icons/issue.svg b/assets/theme/icons/issue.svg new file mode 100644 index 00000000000..eef2863fdf5 --- /dev/null +++ b/assets/theme/icons/issue.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/content/_index.md b/content/_index.md index 7c7a6e3641a..e78e0a1cc0e 100644 --- a/content/_index.md +++ b/content/_index.md @@ -2,137 +2,33 @@ title: Home description: Docker Documentation is the official Docker library of resources, manuals, and guides to help you containerize applications. keywords: Docker, documentation, manual, guide, reference, api, samples -grid: - - title: Docker Desktop - icon: computer - description: | - Manage containers, applications, and images directly from your machine. - links: - - text: "Overview" - url: "/desktop/" - - text: "Explore Docker Desktop" - url: "/desktop/use-desktop/" - - text: "Release notes" - url: "/desktop/release-notes/" - - title: Docker Engine - icon: developer_board - description: | - The definitive open source container client and runtime. - links: - - text: "Overview" - url: "/engine/" - - text: "Install" - url: "/engine/install/" - - text: "Release notes" - url: "/engine/release-notes/" - - title: Docker Build - icon: build - description: | - Package, test, and ship your applications. - links: - - text: "Overview" - url: "/build/" - - text: "Packaging your software" - url: "/build/building/packaging/" - - text: "Release notes" - url: "/build/release-notes/" - - title: Docker Build Cloud - icon: cloud - description: | - Run your builds in the cloud. - links: - - text: "Overview" - url: "/build-cloud/" - - text: "Setup" - url: "/build-cloud/setup/" - - text: "Release notes" - url: "/build-cloud/release-notes/" - - title: Docker Compose - icon: polyline - description: | - Define and run multi-container applications with Docker. - links: - - text: "Overview" - url: "/compose/" - - text: "Try Docker Compose" - url: "/compose/gettingstarted/" - - text: "Release notes" - url: "/compose/releases/release-notes/" - - title: Docker Hub - icon: device_hub - description: | - Find and share container images and other artifacts. - links: - - text: "Overview" - url: "/docker-hub/" - - text: "Create an account" - url: "/accounts/create-account/" - - text: "Create a repository" - url: "/docker-hub/repos/create/" - - title: Docker Scout - icon: query_stats - description: | - Strengthen your software supply chain with Docker Scout. - links: - - text: "Overview" - url: "/scout/" - - text: "Quickstart" - url: "/scout/quickstart/" - - text: "Image analysis" - url: "/scout/image-analysis/" - - title: Subscription - icon: card_membership - description: | - Licensing for commercial use of Docker components. - links: - - text: "Overview" - url: "/subscription/" - - text: "Subscriptions and features" - url: "/subscription/details/" - - text: "Change subscription" - url: "/subscription/change/" - - title: Billing - icon: payments - description: | - Manage your billing and payment settings for your subscription. - links: - - text: "Overview" - url: "/billing/" - - text: "Update payment method" - url: "/billing/payment-method/" - - text: "View billing history" - url: "/billing/history/" - - title: Administration - icon: admin_panel_settings - description: | - Manage company and organization users, permissions, and more. - links: - - text: "Overview" - url: "/admin/company/" - - text: "Organization administration" - url: "/admin/organization/" - - text: "Company administration" - url: "/admin/company/" - - title: Security - icon: shield - description: | - Security guardrails for both administrators and developers. - links: - - text: "Overview" - url: "/security/" - - text: "SSO" - url: "/security/for-admins/single-sign-on/" - - text: "SCIM" - url: "/security/for-admins/provisioning/scim/" - - title: Testcontainers Cloud - icon: cloud - description: | - Testcontainers Cloud lets you run heavy test workloads remotely. - links: - - text: "Overview" - url: "https://testcontainers.com/cloud/docs/" - - text: "Getting started" - url: "https://testcontainers.com/cloud/docs/#getting-started" - - text: "TCC for CI" - url: "https://testcontainers.com/cloud/docs/#tcc-for-ci" +aliases: + - /search/ --- + +Docker Documentation helps you learn Docker, install Docker products, and find +reference material for everyday development and operations tasks. + +## Browse docs by area + +- [Get started](/get-started/): Learn Docker basics and core concepts. +- [Guides](/guides/): Follow task-focused walkthroughs for common workflows. +- [Manuals](/manuals/): Install, configure, and use Docker products. +- [Reference](/reference/): Browse CLI, API, and file format documentation. + +## Featured topics + +- [Docker Hardened Images](/dhi/) +- [Get started with Docker Sandboxes](/ai/sandboxes/get-started/) +- [Docker Desktop overview](/desktop/) +- [Install Docker Engine](/engine/install/) +- [Dockerfile reference](/reference/dockerfile/) +- [Docker Build overview](/build/) + +## Common questions + +- [How do I get started with Docker?](/get-started/docker-overview/) +- [Can I run my AI agent in a sandbox?](/ai/sandboxes/get-started/) +- [What is a container?](/get-started/docker-concepts/the-basics/what-is-a-container/) +- [What are Docker Hardened Images?](/dhi/) +- [Why should I use Docker Compose?](/compose/) diff --git a/content/contribute/_index.md b/content/contribute/_index.md deleted file mode 100644 index 3801814cd10..00000000000 --- a/content/contribute/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Contribute to Docker's docs -linkTitle: Contribute -weight: 10 -toc_max: 1 -aliases: -- /opensource/ -- /contribute/overview/ -- /contribute/contribute-guide/ -grid: -- title: Grammar and style - description: Explore Docker's grammar and style guide - icon: menu_book - link: /contribute/style/grammar -- title: Formatting - description: Format your content to match the rest of our documentation. - icon: newspaper - link: /contribute/style/formatting -- title: Recommended word list - description: Choose the right words for your content. - icon: checklist - link: /contribute/style/recommended-words -- title: Source file conventions - description: Guidelines for creating a new page. - icon: note_add - link: /contribute/file-conventions -- title: Terminology - description: Explore commonly used Docker terms. - icon: spellcheck - link: /contribute/style/terminology -- title: Voice and tone - description: Learn about how we use voice and tone in our writing. - icon: voice_selection - link: /contribute/style/voice-tone ---- - -We value documentation contributions from the Docker community. We'd like to -make it as easy as possible for you to contribute to the Docker documentation. - -Find the contribution guidelines in -[CONTRIBUTING.md](https://github.com/docker/docs/blob/main/CONTRIBUTING.md) in -the `docker/docs` GitHub repository. Use the following links to review our -style guide and instructions on how to use our page templates and components. - -{{< grid >}} - -### Additional resources - -See also: - -- A section of useful components you can add to your documentation. -- Information on Docker's [tone and voice](style/voice-tone.md). -- A [writing checklist](checklist.md) to help you when you're contributing to Docker's documentation. diff --git a/content/contribute/checklist.md b/content/contribute/checklist.md deleted file mode 100644 index e9e8af6fead..00000000000 --- a/content/contribute/checklist.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Writing checklist -description: A helpful writing checklist when creating documentation -keywords: checklist, documentation, style guide, contribute -weight: 60 ---- - -Use this checklist to communicate in a way that is clear, helpful, and consistent with the rest of Docker Docs. - -##### Used active voice - -Active voice is specific and removes ambiguity. - -In active voice, the subject of the sentence (the customer or the system) does the action. - -Sentences that use active voice are easier to read. Active voice makes it clear who has done what and to whom. Plus, active voice keeps sentences direct and more concise. - -Helping verbs such as is, was, or would may indicate that you're writing in passive voice. And, if you can add the phrase 'by zombies' after the verb, you're writing in the passive voice. - -|Correct| Incorrect| -|:--|:--| -|Increase productivity with Docker Desktop.| Productivity can be increased (by zombies) with Docker Desktop.| -|If you remove items from a grid, charts automatically refresh to display the change. | If items are removed (by zombies) from a grid, charts automatically refresh to display the change.| - -##### Written clear sentences that get to the point - -Write short, concise sentences. Punchy sentences are faster to read and easier to understand. - -##### Used subheadings and bulleted lists to break up the page - -This helps find the information they need quickly and easily. - -For more information, see the [formatting](style/formatting.md#headings-and-subheadings) page, or see the [components](components/lists.md) for examples. - -##### Started the title of your page with a verb - -For example, 'Install Docker Desktop'. - -##### Checked that the left-hand table of contents title in docs.docker.com, matches the title displayed on the page - -##### Checked for broken links and images - -Use relative links to link to other pages or images within the GitHub repository. - -For more information, see the [formatting](style/formatting.md#links) page, or see the [components](components/links.md) for examples. - -##### Checked that any redirects you may have added, work - -For information on how to add redirects, see [Source file conventions](file-conventions.md#front-matter). - -##### Used bold on any UI elements you refer to in your content - -##### Completed a final spelling, punctuation, and grammar check - -For more in-depth information on our Style Guide, explore the [grammar](style/grammar.md) or [formatting](style/formatting.md) guides. diff --git a/content/contribute/components/_index.md b/content/contribute/components/_index.md deleted file mode 100644 index 3c3ff89b1a6..00000000000 --- a/content/contribute/components/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -build: - render: never -title: Useful components -weight: 50 ---- \ No newline at end of file diff --git a/content/contribute/components/accordions.md b/content/contribute/components/accordions.md deleted file mode 100644 index c88cf9bdaeb..00000000000 --- a/content/contribute/components/accordions.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Accordions -toc_max: 3 ---- - -## Example - -{{< accordion title="Accordion example" >}} - -```console -$ docker run hello-world -``` - -{{< /accordion >}} - -## Markup - -````markdown -{{}} - -```console -$ docker run hello-world -``` - -{{}} -```` diff --git a/content/contribute/components/badges.md b/content/contribute/components/badges.md deleted file mode 100644 index db1cedf36c2..00000000000 --- a/content/contribute/components/badges.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Badges -toc_max: 3 ---- - -### Examples - -{{< badge color=blue text="blue badge" >}} -{{< badge color=amber text="amber badge" >}} -{{< badge color=red text="red badge" >}} -{{< badge color=green text="green badge" >}} -{{< badge color=violet text="violet badge" >}} - -You can also make a badge a link. - -[{{< badge color="blue" text="badge with a link" >}}](../_index.md) - -### Usage guidelines - -We use badges to indicate new content and product content in various stages of the release lifecycle: - -- The violet badge to highlight new early access or experimental content -- The blue badge to highlight beta content -- The green badge to highlight new content that is either GA or not product-related content, such as guides/learning paths - -Best practice is to use this badge for no longer than 2 months post release of the feature. - -### Markup - -```go -{{}} -[{{}}](../overview.md) -``` diff --git a/content/contribute/components/buttons.md b/content/contribute/components/buttons.md deleted file mode 100644 index 7e7aed3fdb6..00000000000 --- a/content/contribute/components/buttons.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Buttons -toc_max: 3 ---- - -### Examples - -{{< button url="https://example.com/" text="hello" >}} - -### Markup - -```go -{{}} -``` diff --git a/content/contribute/components/call-outs.md b/content/contribute/components/call-outs.md deleted file mode 100644 index 231e896d2b2..00000000000 --- a/content/contribute/components/call-outs.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Callouts -toc_max: 3 ---- - -We support these broad categories of callouts: - -- Alerts: Note, Tip, Important, Warning, Caution - -We also support summary bars, which represent a feature's required subscription, version, or Adminstrator role. -To add a summary bar: - -Add the feature name to the `/data/summary.yaml` file. Use the following attributes: - -| Attribute | Description | Possible values | -|----------------|--------------------------------------------------------|---------------------------------------------------------| -| `subscription` | Notes the subscription required to use the feature | All, Personal, Pro, Team, Business | -| `availability` | Notes what product development stage the feature is in | Experimental, Beta, Early Access, GA, Retired | -| `requires` | Notes what minimum version is required for the feature | No specific value, use a string to describe the version and link to relevant release notes | -| `for` | Notes if the feature is intended for IT Administrators | Administrators | - -Then, add the `summary-bar` shortcode on the page you want to add the summary bar to. Note, the feature name is case sensitive. The icons that appear in the summary bar are automatically rendered. - -## Examples - -{{< summary-bar feature_name="PKG installer" >}} - -> [!NOTE] -> -> Note the way the `get_hit_count` function is written. This basic retry -> loop lets us attempt our request multiple times if the redis service is -> not available. This is useful at startup while the application comes -> online, but also makes our application more resilient if the Redis -> service needs to be restarted anytime during the app's lifetime. In a -> cluster, this also helps handling momentary connection drops between -> nodes. - -> [!TIP] -> -> For a smaller base image, use `alpine`. - -> [!IMPORTANT] -> -> Treat access tokens like your password and keep them secret. Store your -> tokens securely (for example, in a credential manager). - -> [!WARNING] -> -> Removing Volumes -> -> By default, named volumes in your compose file are NOT removed when running -> `docker compose down`. If you want to remove the volumes, you will need to add -> the `--volumes` flag. -> -> The Docker Desktop Dashboard does not remove volumes when you delete the app stack. - -> [!CAUTION] -> -> Here be dragons. - -For both of the following callouts, consult [the Docker release lifecycle](/release-lifecycle) for more information on when to use them. - -## Formatting - -```md -{{}} -``` - -```html -> [!NOTE] -> -> Note the way the `get_hit_count` function is written. This basic retry -> loop lets us attempt our request multiple times if the redis service is -> not available. This is useful at startup while the application comes -> online, but also makes our application more resilient if the Redis -> service needs to be restarted anytime during the app's lifetime. In a -> cluster, this also helps handling momentary connection drops between -> nodes. - -> [!TIP] -> -> For a smaller base image, use `alpine`. - -> [!IMPORTANT] -> -> Treat access tokens like your password and keep them secret. Store your -> tokens securely (for example, in a credential manager). - -> [!WARNING] -> -> Removing Volumes -> -> By default, named volumes in your compose file are NOT removed when running -> `docker compose down`. If you want to remove the volumes, you will need to add -> the `--volumes` flag. -> -> The Docker Desktop Dashboard does not remove volumes when you delete the app stack. - -> [!CAUTION] -> -> Here be dragons. -``` \ No newline at end of file diff --git a/content/contribute/components/cards.md b/content/contribute/components/cards.md deleted file mode 100644 index 0dc0f009cff..00000000000 --- a/content/contribute/components/cards.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Cards -toc_max: 3 -grid: -- title: Docker Desktop - description: Docker on your Desktop. - icon: install_desktop - link: /desktop/ -- title: Docker Engine - description: Vrrrrooooommm - icon: developer_board - link: /engine/ -- title: Docker Build - description: Clang bang - icon: build - link: /build/ -- title: Docker Compose - description: Figgy! - icon: account_tree - link: /compose/ -- title: Docker Hub - description: so much content, oh wow - icon: hub - link: /docker-hub/ ---- - -Cards can be added to a page using the `card` shortcode. -The parameters for this shortcode are: - -| Parameter | Description | -| ----------- | -------------------------------------------------------------------- | -| title | The title of the card | -| icon | The icon slug of the card | -| image | Use a custom image instead of an icon (mutually exclusive with icon) | -| link | (Optional) The link target of the card, when clicked | -| description | A description text, in Markdown | - -> [!NOTE] -> -> There's a known limitation with the Markdown description of cards, -> in that they can't contain relative links, pointing to other .md documents. -> Such links won't render correctly. Instead, use an absolute link to the URL -> path of the page that you want to link to. -> -> For example, instead of linking to `../install/linux.md`, write: -> `/engine/install/linux/`. - -## Example - -{{< card - title="Get your Docker on" - icon=favorite - link=https://docs.docker.com/ - description="Build, share, and run your apps with Docker" >}} - -## Markup - -```go -{{}} -``` - -### Grid - -There's also a built-in `grid` shortcode that generates a... well, grid... of cards. -The grid size is 3x3 on large screens, 2x2 on medium, and single column on small. - -{{< grid >}} - -Grid is a separate shortcode from `card`, but it implements the same component under the hood. -The markup you use to insert a grid is slightly unconventional. The grid shortcode takes no arguments. -All it does is let you specify where on a page you want your grid to appear. - -```go -{{}} -``` - -The data for the grid is defined in the front matter of the page, under the `grid` key, as follows: - -```yaml -# front matter section of a page -title: some page -grid: - - title: "Docker Engine" - description: Vrrrrooooommm - icon: "developer_board" - link: "/engine/" - - title: "Docker Build" - description: Clang bang - icon: "build" - link: "/build/" -``` diff --git a/content/contribute/components/code-blocks.md b/content/contribute/components/code-blocks.md deleted file mode 100644 index 7df04769a2b..00000000000 --- a/content/contribute/components/code-blocks.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Code blocks -toc_max: 3 ---- - -Rouge provides lots of different code block "hints". If you leave off the hint, -it tries to guess and sometimes gets it wrong. These are just a few hints that -we use often. - -## Variables - -If your example contains a placeholder value that's subject to change, -use the format `<[A-Z_]+>` for the placeholder value: `` - -```none -export name= -``` - -This syntax is reserved for variable names, and will cause the variable to -be rendered in a special color and font style. - -## Highlight lines - -```text {hl_lines=[7,8]} -incoming := map[string]interface{}{ - "asdf": 1, - "qwer": []interface{}{}, - "zxcv": []interface{}{ - map[string]interface{}{}, - true, - int(1e9), - "tyui", - }, -} -``` - -````markdown -```go {hl_lines=[7,8]} -incoming := map[string]interface{}{ - "asdf": 1, - "qwer": []interface{}{}, - "zxcv": []interface{}{ - map[string]interface{}{}, - true, - int(1e9), - "tyui", - }, -} -``` -```` - -## Collapsible code blocks - -```dockerfile {collapse=true} -# syntax=docker/dockerfile:1 - -ARG GO_VERSION="1.21" - -FROM golang:${GO_VERSION}-alpine AS base -ENV CGO_ENABLED=0 -ENV GOPRIVATE="github.com/foo/*" -RUN apk add --no-cache file git rsync openssh-client -RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan github.com >> ~/.ssh/known_hosts -WORKDIR /src - -FROM base AS vendor -# this step configure git and checks the ssh key is loaded -RUN --mount=type=ssh < - - -Welcome to nginx! - - -``` - -## Markdown - -```markdown -# Hello -``` - -If you want to include a triple-fenced code block inside your code block, -you can wrap your block in a quadruple-fenced code block: - -`````markdown -````markdown -# Hello - -```go -log.Println("did something") -``` -```` -````` - -## ini - -```ini -[supervisord] -nodaemon=true - -[program:sshd] -command=/usr/sbin/sshd -D -``` - -## Dockerfile - -```dockerfile -# syntax=docker/dockerfile:1 - -FROM ubuntu - -RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 - -RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list - -RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 - -# Note: The official Debian and Ubuntu images automatically ``apt-get clean`` -# after each ``apt-get`` - -USER postgres - -RUN /etc/init.d/postgresql start &&\ - psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ - createdb -O docker docker - -RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf - -RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf - -EXPOSE 5432 - -VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] - -CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] -``` - -## YAML - -```yaml -authorizedkeys: - image: dockercloud/authorizedkeys - deployment_strategy: every_node - autodestroy: always - environment: - - AUTHORIZED_KEYS=ssh-rsa AAAAB3Nsomelongsshkeystringhereu9UzQbVKy9o00NqXa5jkmZ9Yd0BJBjFmb3WwUR8sJWZVTPFL - volumes: - /root:/user:rw -``` diff --git a/content/contribute/components/icons.md b/content/contribute/components/icons.md deleted file mode 100644 index b89533075d2..00000000000 --- a/content/contribute/components/icons.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -description: Icons used across docs -title: Icons -grid: - - title: "Install" - icon: "download" - description: Icon name = download - - title: "FAQs" - icon: "help" - description: Icon name = help - - title: "Onboarding/quickstarts" - icon: "explore" - description: Icon name = explore - - title: "Release notes" - icon: "note_add" - description: Icon name = note_add - - title: "Feedback" - icon: "sms" - description: Icon name = sms - - title: "Multi-platform/arch" - icon: "content_copy" - description: Icon name = content_copy - - title: "Rootless/ECI" - icon: "security" - description: Icon name = security - - title: "Settings management" - icon: "shield_lock" - description: Icon name = shield_lock - - title: "Processes" - icon: "checklist" - description: Icon name = checklist - - title: "Networking" - icon: "network_node" - description: Icon name = network_node - - title: "Exploring a feature" - icon: "feature_search" - description: Icon name = feature_search - - title: "Company" - icon: "apartment" - description: Icon name = apartment - - title: "Organization" - icon: "store" - description: Icon name = store - - title: "Additional resources" - icon: "category" - description: Icon name = category - - title: "Designing" - icon: "design_services" - description: Icon name = design_services - - title: "Publishing" - icon: "publish" - description: Icon name = publish - - title: "Interacting" - icon: "multiple_stop" - description: Icon name = multiple_stop - - title: "Storage" - icon: "database" - description: Icon name = database - - title: "logs" - icon: "text_snippet" - description: Icon name = text_snippet - - title: "Prune/cut" - icon: "content_cut" - description: Icon name = content_cut - - title: "Configure" - icon: "tune" - description: Icon name = tune - - title: "Deprecated" - icon: "folder_delete" - description: Icon name = folder_delete - - title: "RAM" - icon: "home_storage" - description: Icon name = home_storage - - title: "IAM" - icon: "photo_library" - description: Icon name = photo_library - - title: "Packaging" - icon: "inventory_2" - description: Icon name = inventory_2 - - title: "Multi-stage" - icon: "stairs" - description: Icon name = stairs - - title: "Architecture" - icon: "construction" - description: Icon name = construction - - title: "Build drivers" - icon: "engineering" - description: Icon name = engineering - - title: "Export" - icon: "output" - description: Icon name = output - - title: "Cache" - icon: "cycle" - description: Icon name = cycle - - title: "Bake" - icon: "cake" - description: Icon name = cake - - title: "Docker ID" - icon: "fingerprint" - description: Icon name = fingerprint - - title: "Repository" - icon: "inbox" - description: Icon name = inbox - - title: "Access tokens" - icon: "password" - description: Icon name = password - - title: "official images" - icon: "verified" - description: Icon name = verified - - title: "Hardened Docker Desktop" - icon: "lock" - description: Icon name = lock - - title: "Sign in" - icon: "passkey" - description: Icon name = passkey - - title: "SSO and SCIM" - icon: "key" - description: Icon name = key - - title: "2FA" - icon: "phonelink_lock" - description: Icon name = phonelink_lock - - title: "Add/update payment method" - icon: "credit_score" - description: Icon name = credit_score - - title: "Update billing info" - icon: "contract_edit" - description: Icon name = contract_edit - - title: "Billing history" - icon: "payments" - description: Icon name = payments - - title: "Upgrade" - icon: "upgrade" - description: Icon name = upgrade - - title: "Add/manage more seats/users" - icon: "group_add" - description: Icon name = group_add - - title: "Domains" - icon: "domain_verification" - description: Icon name = domain_verification - - title: Company owner - description: Icon name = supervised_user_circle - icon: supervised_user_circle - - title: "General settings" - icon: "settings" - description: Icon name = settings ---- - -Below is an inventory of the icons we use to represent different topics or features across docs. To be used with the [cards component](cards.md). - -{{< grid >}} - diff --git a/content/contribute/components/images.md b/content/contribute/components/images.md deleted file mode 100644 index 305334c283e..00000000000 --- a/content/contribute/components/images.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Images -toc_max: 3 ---- - -## Example - -- A small image: ![a small image](/assets/images/footer_moby_icon.png) - -- Large images occupy the full width of the reading column by default: - - ![a pretty wide image](/assets/images/banner_image_24512.png) - -- Image size can be set using query parameters: `?h=&w=` - - ![a pretty wide image](/assets/images/banner_image_24512.png?w=100&h=50) - -- Image with a border, also set with a query parameter: `?border=true` - - ![a small image](/assets/images/footer_moby_icon.png?border=true) - - -## HTML and Markdown - -```markdown -- A small image: ![a small image](/assets/images/footer_moby_icon.png) - -- Large images occupy the full width of the reading column by default: - - ![a pretty wide image](/assets/images/banner_image_24512.png) - -- Image size can be set using query parameters: `?h=&w=` - - ![a pretty wide image](/assets/images/banner_image_24512.png?w=100&h=50) - -- Image with a border, also set with a query parameter: `?border=true` - - ![a small image](/assets/images/footer_moby_icon.png?border=true) -``` diff --git a/content/contribute/components/links.md b/content/contribute/components/links.md deleted file mode 100644 index 97f47fbd4fc..00000000000 --- a/content/contribute/components/links.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Links -toc_max: 3 ---- - -## Examples - -- [External links](https://docker.com) open in a new tab -- [Internal links](links.md) open in the same tab - -You can use relative links, using source filenames, -or you can use absolute links for pages as they appear on the final site. - -#### Links to auto-generated content - -When you link to heading IDs in auto-generated pages, such as CLI reference content, -you won't get any help from your editor in resolving the anchor names. That's -because the pages are generated at build-time and your editor or LSP doesn't know -about them in advance. - -## Syntax - -```md -[External links](https://docker.com) -[Internal links](links.md) -``` diff --git a/content/contribute/components/lists.md b/content/contribute/components/lists.md deleted file mode 100644 index 551b47af889..00000000000 --- a/content/contribute/components/lists.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Lists -toc_max: 3 ---- - -## Examples - -Use dashes (`-`) or asterisks (`*`) for bullet points. - -- Bullet list item 1 -- Bullet list item 2 -- Bullet list item 3 - -1. Numbered list item 1. Two spaces between the period and the first letter - helps with alignment. - -2. Numbered list item 2. Let's put a note in it. - - > [!NOTE]: We did it! - -3. Numbered list item 3 with a code block in it. You need the blank line before - the code block happens. - - ```bash - $ docker run hello-world - ``` - -4. Numbered list item 4 with a bullet list inside it and a numbered list - inside that. - - - Sub-item 1 - - Sub-item 2 - - 1. Sub-sub-item 1 - 2. Sub-sub-item-2 with a table inside it because we like to party! - Indentation is super important. - - | Header 1 | Header 2 | - | -------- | -------- | - | Thing 1 | Thing 2 | - | Thing 3 | Thing 4 | - -## Markdown - -````md -- Bullet list item 1 -- Bullet list item 2 -- Bullet list item 3 - -1. Numbered list item 1. Two spaces between the period and the first letter - helps with alignment. - -2. Numbered list item 2. Let's put a note in it. - - > [!NOTE]: We did it! - -3. Numbered list item 3 with a code block in it. You need the blank line before - the code block happens. - - ```bash - $ docker run hello-world - ``` - -4. Numbered list item 4 with a bullet list inside it and a numbered list - inside that. - - - Sub-item 1 - - Sub-item 2 - - 1. Sub-sub-item 1 - 2. Sub-sub-item-2 with a table inside it. - Indentation is super important. - - | Header 1 | Header 2 | - | -------- | -------- | - | Thing 1 | Thing 2 | - | Thing 3 | Thing 4 | -```` diff --git a/content/contribute/components/tables.md b/content/contribute/components/tables.md deleted file mode 100644 index 1631bafb2bb..00000000000 --- a/content/contribute/components/tables.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Tables -toc_max: 3 ---- - -## Example - -### Basic table - -| Permission level | Access | -| :----------------------------------------------------------------------- | :------------------------------------------------------------ | -| **Bold** or _italic_ within a table cell. Next cell is empty on purpose. | | -| | Previous cell is empty. A `--flag` in mono text. | -| Read | Pull | -| Read/Write | Pull, push | -| Admin | All of the above, plus update description, create, and delete | - -### Feature-support table - -| Platform | x86_64 / amd64 | -| :--------- | :------------: | -| Ubuntu | ✅ | -| Debian | ✅ | -| Fedora | | -| Arch (btw) | ✅ | - -## Markdown - -### Basic table - -```md -| Permission level | Access | -| :----------------------------------------------------------------------- | :------------------------------------------------------------ | -| **Bold** or _italic_ within a table cell. Next cell is empty on purpose. | | -| | Previous cell is empty. A `--flag` in mono text. | -| Read | Pull | -| Read/Write | Pull, push | -| Admin | All of the above, plus update description, create, and delete | -``` - -The alignment of the cells in the source doesn't really matter. The ending pipe -character is optional (unless the last cell is supposed to be empty). - -### Feature-support table - -```md -| Platform | x86_64 / amd64 | -| :--------- | :------------: | -| Ubuntu | ✅ | -| Debian | ✅ | -| Fedora | | -| Arch (btw) | ✅ | -``` diff --git a/content/contribute/components/tabs.md b/content/contribute/components/tabs.md deleted file mode 100644 index ceca66e690e..00000000000 --- a/content/contribute/components/tabs.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -description: components and formatting examples used in Docker's docs -title: Tabs -toc_max: 3 ---- - -The tabs component consists of two shortcodes: - -- `{{}}` -- `{{}}` - -The `{{}}` shortcode is a parent, component, wrapping a number of `tabs`. -Each `{{}}` is given a name using the `name` attribute. - -You can optionally specify a `group` attribute for the `tabs` wrapper to indicate -that a tab section should belong to a group of tabs. See [Groups](#groups). - -## Example - -{{< tabs >}} -{{< tab name="JavaScript">}} - -```js -console.log("hello world") -``` - -{{< /tab >}} -{{< tab name="Go">}} - -```go -fmt.Println("hello world") -``` - -{{< /tab >}} -{{< /tabs >}} - -## Markup - -````markdown -{{}} -{{}} - -```js -console.log("hello world") -``` - -{{}} -{{}} - -```go -fmt.Println("hello world") -``` - -{{}} -{{}} -```` - -## Groups - -You can optionally specify a tab group on the `tabs` shortcode. -Doing so will synchronize the tab selection for all of the tabs that belong to the same group. - -### Tab group example - -The following example shows two tab sections belonging to the same group. - -{{< tabs group="code" >}} -{{< tab name="JavaScript">}} - -```js -console.log("hello world") -``` - -{{< /tab >}} -{{< tab name="Go">}} - -```go -fmt.Println("hello world") -``` - -{{< /tab >}} -{{< /tabs >}} - -{{< tabs group="code" >}} -{{< tab name="JavaScript">}} - -```js -const res = await fetch("/users/1") -``` - -{{< /tab >}} -{{< tab name="Go">}} - -```go -resp, err := http.Get("/users/1") -``` - -{{< /tab >}} -{{< /tabs >}} diff --git a/content/contribute/components/videos.md b/content/contribute/components/videos.md deleted file mode 100644 index 3e2f1bcec55..00000000000 --- a/content/contribute/components/videos.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: Learn about guidelines and best practices for videos in docs, and how to add a video component. -title: Videos -toc_max: 3 ---- - -## Video guidelines - -Videos are used rarely in Docker's documentation. When used, video should complement written text and not be the sole format of documentation. Videos can take longer to produce and be more difficult to maintain than written text or even screenshots, so consider the following before adding video: - -- Can you demonstrate a clear customer need for using video? -- Does the video offer new content, rather than directly reading or re-purposing official documentation? -- If the video contains user interfaces that may change regularly, do you have a maintenance plan to keep the video fresh? -- Does the [voice and tone](../style/voice-tone.md) of the video align with the rest of the documentation? -- Have you considered other options, such as screenshots or clarifying existing documentation? -- Is the quality of the video similar to the rest of Docker's documentation? -- Can the video be linked or embedded from the site? - -If all of the above criteria are met, you can reference the following best practices before creating a video to add to Docker documentation. - -### Best practices - -- Determine the audience for your video. Will the video be a broad overview for beginners, or will it be a deep dive into a technical process designed for an advanced user? -- Videos should be less than 5 minutes long. Keep in mind how long the video needs to be to properly explain the topic, and if the video needs to be longer than 5 minutes, consider text, diagrams, or screenshots instead. These are easier for a user to scan for relevant information. -- Videos should adhere to the same standards for accessibility as the rest of the documentation. -- Ensure the quality of your video by writing a script (if there's narration), making sure multiple browsers and URLs aren't visible, blurring or cropping out any sensitive information, and using smooth transitions between different browsers or screens. - -Videos are not hosted in the Docker documentation repository. To add a video, you can use a [link](./links.md) to hosted content, or embed using an [iframe](#iframe). - - -## iframe - -To embed a video on a docs page, use an ` -``` - -## asciinema - -`asciinema` is a command line tool for recording terminal sessions. The -recordings can be embedded on the documentation site. These are similar to -`console` code blocks, but since they're playable and scrubbable videos, they -add another level of usefulness over static codeblocks in some cases. Text in -an `asciinema` "video" can also be copied, which makes them more useful. - -Consider using an `asciinema` recording if: - -- The input/output of the terminal commands are too long for a static example - (you could also consider truncating the output) -- The steps you want to show are easily demonstrated in a few commands -- Where the it's useful to see both inputs and outputs of commands - -To create an `asciinema` recording and add it to docs: - -1. [Install](https://docs.asciinema.org/getting-started/) the `asciinema` CLI -2. Run `asciinema auth` to configure your client and create an account -3. Start a new recording with `asciinema rec` -4. Run the commands for your demo and stop the recording with `` or `exit` -5. Upload the recording to -6. Embed the player with a ` + +Pass a prompt directly as an argument: + +```console +$ docker ai "list my running containers" +``` + +Exit the TUI with `/exit` or Ctrl+C. + +## Working directory + +The working directory sets the default context for Gordon's file operations. + +Gordon uses your current shell directory as the working directory: + +```console +$ cd ~/my-project +$ docker ai +``` + +Override with `-C` or `--working-dir`: + +```console +$ docker ai -C ~/different-project +``` + +## Disabling Gordon + +Gordon CLI is part of Docker Desktop. To disable it, disable Gordon in Docker +Desktop Settings: + +1. Open Docker Desktop Settings. +2. Navigate to the **Beta features** section. +3. Clear the **Enable Gordon** option. +4. Select **Apply**. + +## Commands + +The `docker ai` command includes several subcommands: + +Interactive mode (default): + +```console +$ docker ai +``` + +Opens the TUI for conversational interaction. + +Version: + +```console +$ docker ai version +``` + +Displays the Gordon version. + +Feedback: + +```console +$ docker ai feedback +``` + +Opens a feedback form in your browser. diff --git a/content/manuals/ai/gordon/how-to/configure-tools.md b/content/manuals/ai/gordon/how-to/configure-tools.md new file mode 100644 index 00000000000..36c715724d7 --- /dev/null +++ b/content/manuals/ai/gordon/how-to/configure-tools.md @@ -0,0 +1,81 @@ +--- +title: Configure Gordon's tools +linkTitle: Configure tools +description: Enable and disable Gordon's built-in tools based on your needs +weight: 40 +--- + +{{< summary-bar feature_name="Gordon" >}} + +Gordon includes built-in tools that extend its capabilities. You can configure +which tools Gordon has access to based on your security requirements and +workflow needs. + +Tool configuration provides an additional layer of control: + +- Enabled tools: Gordon can propose actions using these tools (subject to + your approval) +- Disabled tools: Gordon cannot use these tools, and will not request + permission to use them + +## Accessing tool settings + +To configure Gordon's tools: + +1. Open Docker Desktop. +2. Select **Gordon** in the sidebar. +3. Select the settings icon at the bottom of the text input area. + + ![Session settings icon](../images/perm_settings.avif?border=true) + +The tool settings dialog opens with two tabs: **Basic** and **Advanced**. + +## Basic tool settings + +In the **Basic** tab, you can enable or disable individual tools globally. + +To disable a tool: + +1. Find the tool you want to disable in the list. +2. Toggle it off. +3. Select **Save**. + +Disabled tools cannot be used by Gordon, even with your approval. + +## Advanced tool settings + +The **Advanced** tab lets you create fine-grained allow-lists and deny-lists +for specific commands or patterns. + +Allow-lists: +Gordon can use allow-listed commands even when the main tool is disabled. For +example, disable the shell tool but allow `cat`, `grep`, and `ls`. + +Deny-lists: +Block specific commands while keeping the tool enabled. For example, allow the +shell tool but deny `chown` and `chmod`. + +To configure: + +1. Switch to the **Advanced** tab. +2. Add commands to **Allow rules** or **Deny rules**. +3. Select **Save**. + +![Advanced tool configuration](../images/gordon_advanced_tool_config.avif?w=500px&border=true) + +Gordon still requests approval before running allow-listed tools, unless YOLO +mode (auto-approve mode that bypasses permission checks) is enabled. + +## Organizational controls + +For Business subscriptions, administrators can control tool access for the +entire organization using Settings Management. + +Administrators can: + +- Disable specific tools for all users +- Lock tool configuration to prevent users from changing it +- Set organization-wide tool policies + +See [Settings Management](/enterprise/security/hardened-desktop/settings-management/) +for details. diff --git a/content/manuals/ai/gordon/how-to/docker-desktop.md b/content/manuals/ai/gordon/how-to/docker-desktop.md new file mode 100644 index 00000000000..f8ad9a960eb --- /dev/null +++ b/content/manuals/ai/gordon/how-to/docker-desktop.md @@ -0,0 +1,46 @@ +--- +title: Using Gordon in Docker Desktop +linkTitle: Docker Desktop +description: Access and use Gordon through the Docker Desktop graphical interface +weight: 10 +--- + +{{< summary-bar feature_name="Gordon" >}} + +Gordon is integrated into Docker Desktop. Access it from the sidebar to open +the Gordon view. + +## Basic usage + +To access Gordon: + +1. Open Docker Desktop and sign in to your Docker account. +2. Select **Gordon** in the sidebar. +3. Type your question or request in the input field. +4. Press Enter or select the send button. + +Gordon responds in the chat view and maintains context throughout the session. + +## Working directory + +The working directory sets the default context for Gordon's file operations. +Select your working directory when you start Gordon or use the directory icon +to change it during a conversation: + +1. Select the directory icon in the Gordon input area. +2. Browse and select a different directory. + +## Disabling Gordon + +To disable Gordon: + +1. Open Docker Desktop Settings. +2. Navigate to the **Beta features** section. +3. Clear the **Enable Gordon** option. +4. Select **Apply**. + +## Configure tools + +You can control which tools Gordon has access to. See [Configure +tools](./configure-tools.md) for details on enabling, disabling, and +fine-tuning tool permissions. diff --git a/content/manuals/ai/gordon/how-to/permissions.md b/content/manuals/ai/gordon/how-to/permissions.md new file mode 100644 index 00000000000..006404f856f --- /dev/null +++ b/content/manuals/ai/gordon/how-to/permissions.md @@ -0,0 +1,125 @@ +--- +title: Gordon's permission model +linkTitle: Permissions +description: How Gordon's ask-first approach keeps you in control +weight: 30 +--- + +{{< summary-bar feature_name="Gordon" >}} + +Before Gordon uses a tool or action that can modify your system, it proposes +the action and waits for your approval before executing. + +## What requires approval + +By default, the following actions require approval before Gordon can use them: + +- Commands executed in your shell +- Writing or changing files +- Fetching information from the internet + +## What doesn't require approval + +- Reading files, listing directories (even outside Gordon's working directory) +- Searching the Docker documentation +- Analyzing code or explaining errors + +## Configuring permission settings + +To change the default permission settings for Gordon: + +1. Open Docker Desktop. +2. Select **Gordon** in the sidebar. +3. Select the settings icon at the bottom of text input. + + ![Session settings icon](../images/perm_settings.avif) + +In the **Basic** tab you can configure whether Gordon should ask for permission +before using a tool. + +You can also enable YOLO mode to bypass permission checking altogether. + +The new permission settings apply immediately to all sessions. + +## Session-level permissions + +When you choose "Approve for this session" (Desktop) or "A" (CLI), Gordon can +use that specific tool without asking again during the current conversation. + +Example: + +```console +$ docker ai "check my containers and clean up stopped ones" + +Gordon proposes: + docker ps -a + +Approve? [Y/n/a]: a + +[Gordon executes docker ps -a] + +Gordon proposes: + docker container prune -f + +[Executes automatically - you approved shell access for this session] +``` + +Session permissions reset when: + +- You close the Gordon view (Desktop) +- You exit `docker ai` (CLI) +- You start a new conversation + +## Security considerations + +Working directory +: The working directory sets the default context for file operations. It does + not constrain Gordon's access to files or directories; Gordon can read files + outside this directory. + +Verify before approving +: Gordon can make mistakes. Before approving: + + - Confirm commands match your intent + - Check container names and image tags are correct + - Verify volume mounts and port mappings + - Review file changes for important logic + + If you don't understand an operation, ask Gordon to explain it or reject and + request a different approach. + +Destructive operations +: Gordon warns about destructive operations but won't prevent them. Operations + like `docker container rm`, `docker system prune`, and `docker volume rm` can + cause permanent data loss. Back up important data first. + +## Stopping and reverting + +Stop Gordon during execution by pressing `Ctrl+C` (CLI) or selecting **Cancel** +(Desktop). + +Revert Gordon's actions using Docker commands or version control: + +- Restore files from Git +- Restart stopped containers +- Rebuild images +- Recreate volumes from backups + +Use version control for all files in your working directory. + +## Organizational controls + +Administrators can control Gordon's capabilities at the organization level +using Settings Management. + +Available controls: + +- Disable Gordon entirely +- Restrict tool capabilities +- Set working directory boundaries + +For Business subscriptions, Gordon must be enabled by an administrator before +users can access it. + +See [Settings Management](/enterprise/security/hardened-desktop/settings-management/) +for details. diff --git a/content/manuals/ai/gordon/images/gordon.webp b/content/manuals/ai/gordon/images/gordon.webp deleted file mode 100644 index ebf97291489..00000000000 Binary files a/content/manuals/ai/gordon/images/gordon.webp and /dev/null differ diff --git a/content/manuals/ai/gordon/images/gordon_advanced_tool_config.avif b/content/manuals/ai/gordon/images/gordon_advanced_tool_config.avif new file mode 100644 index 00000000000..8e1e5508a77 Binary files /dev/null and b/content/manuals/ai/gordon/images/gordon_advanced_tool_config.avif differ diff --git a/content/manuals/ai/gordon/images/gordon_gui.avif b/content/manuals/ai/gordon/images/gordon_gui.avif new file mode 100644 index 00000000000..902e0df6fac Binary files /dev/null and b/content/manuals/ai/gordon/images/gordon_gui.avif differ diff --git a/content/manuals/ai/gordon/images/gordon_tui.avif b/content/manuals/ai/gordon/images/gordon_tui.avif new file mode 100644 index 00000000000..c265d5ba0cf Binary files /dev/null and b/content/manuals/ai/gordon/images/gordon_tui.avif differ diff --git a/content/manuals/ai/gordon/images/perm_settings.avif b/content/manuals/ai/gordon/images/perm_settings.avif new file mode 100644 index 00000000000..96d61d171dc Binary files /dev/null and b/content/manuals/ai/gordon/images/perm_settings.avif differ diff --git a/content/manuals/ai/gordon/images/permissions.avif b/content/manuals/ai/gordon/images/permissions.avif new file mode 100644 index 00000000000..d992f84a027 Binary files /dev/null and b/content/manuals/ai/gordon/images/permissions.avif differ diff --git a/content/manuals/ai/gordon/images/toolbox.webp b/content/manuals/ai/gordon/images/toolbox.webp deleted file mode 100644 index ae8fcf00680..00000000000 Binary files a/content/manuals/ai/gordon/images/toolbox.webp and /dev/null differ diff --git a/content/manuals/ai/gordon/mcp/_index.md b/content/manuals/ai/gordon/mcp/_index.md deleted file mode 100644 index af49c24ed45..00000000000 --- a/content/manuals/ai/gordon/mcp/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: MCP -description: Learn how to use MCP servers with Gordon -keywords: ai, mcp, gordon, docker desktop, docker, llm, -grid: -- title: Built-in tools - description: Use the built-in tools. - icon: construction - link: /ai/gordon/mcp/built-in-tools -- title: MCP configuration - description: Configure MCP tools on a per-project basis. - icon: manufacturing - link: /ai/gordon/mcp/yaml -- title: MCP Server - description: Use Gordon as an MCP server - icon: dns - link: /ai/gordon/mcp/gordon-mcp-server/ -aliases: - - /desktop/features/gordon/mcp/ ---- - -## What is MCP? - -[Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) is -an open protocol that standardizes how applications provide context and extra -functionality to large language models. MCP functions as a client-server -protocol, where the client, for example an application like Gordon, sends -requests, and the server processes those requests to deliver the necessary -context to the AI. This context may be gathered by the MCP server by executing -some code to perform an action and getting the result of the action, calling -external APIs, etc. - -Gordon, along with other MCP clients like Claude Desktop or Cursor, can interact -with MCP servers running as containers. - -{{< grid >}} \ No newline at end of file diff --git a/content/manuals/ai/gordon/mcp/built-in-tools.md b/content/manuals/ai/gordon/mcp/built-in-tools.md deleted file mode 100644 index 9fd76880ac1..00000000000 --- a/content/manuals/ai/gordon/mcp/built-in-tools.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -title: Built-in tools -description: How to use Gordon's built-in tools -keywords: ai, mcp, gordon -aliases: - - /desktop/features/gordon/mcp/built-in-tools/ ---- - -Gordon comes with an integrated toolbox providing access to various system tools -and capabilities. These tools extend Gordon's functionality by allowing it to -interact with the Docker Engine, Kubernetes, Docker Scout's security scanning, -and other developer utilities. This documentation covers the available tools, -their configuration, and usage patterns. - -## Configuration - -Tools can be configured globally in the toolbox, making them accessible -throughout the Gordon interfaces, including both Docker Desktop and the CLI. - -To configure: - -1. On the **Ask Gordon** view in Docker Desktop, select the `Toolbox` button in the bottom left of the input area. - - ![Gordon page with the toolbox button](../images/gordon.webp) - -2. Choose the tools you want to make available. Selecting a card lets you view extra information regarding each tool and what it does. - - ![Gordon's Toolbox](../images/toolbox.webp) - - For more information on the possible tools, see [Reference](#reference). - -## Usage examples - -This section provides task-oriented examples for common operations with Gordon -tools. - -### Managing Docker containers - -#### List and monitor containers - -```console -# List all running containers -$ docker ai "Show me all running containers" - -# List containers using specific resources -$ docker ai "List all containers using more than 1GB of memory" - -# View logs from a specific container -$ docker ai "Show me logs from my running api-container from the last hour" -``` - -#### Manage container lifecycle - -```console -# Run a new container -$ docker ai "Run a nginx container with port 80 exposed to localhost" - -# Stop a specific container -$ docker ai "Stop my database container" - -# Clean up unused containers -$ docker ai "Remove all stopped containers" -``` - -### Working with Docker images - -```console -# List available images -$ docker ai "Show me all my local Docker images" - -# Pull a specific image -$ docker ai "Pull the latest Ubuntu image" - -# Build an image from a Dockerfile -$ docker ai "Build an image from my current directory and tag it as myapp:latest" - -# Clean up unused images -$ docker ai "Remove all my unused images" -``` - -### Managing Docker volumes - -```console -# List volumes -$ docker ai "List all my Docker volumes" - -# Create a new volume -$ docker ai "Create a new volume called postgres-data" - -# Backup data from a container to a volume -$ docker ai "Create a backup of my postgres container data to a new volume" -``` - -### Kubernetes operations - -```console -# Create a deployment -$ docker ai "Create an nginx deployment and make sure it's exposed locally" - -# List resources -$ docker ai "Show me all deployments in the default namespace" - -# Get logs -$ docker ai "Show me logs from the auth-service pod" -``` - -### Security analysis - - -```console -# Scan for CVEs -$ docker ai "Scan my application for security vulnerabilities" - -# Get security recommendations -$ docker ai "Give me recommendations for improving the security of my nodejs-app image" -``` - -### Development workflows - -```console -# Analyze and commit changes -$ docker ai "Look at my local changes, create multiple commits with sensible commit messages" - -# Review branch status -$ docker ai "Show me the status of my current branch compared to main" -``` - -## Reference - -This section provides a comprehensive listing of the built-in tools you can find -in Gordon's toolbox. - -### Docker tools - -Tools to interact with your Docker containers, images, and volumes. - -#### Container management - -| Name | Description | -|------|-------------| -| `list_containers` | List all Docker containers | -| `remove_containers` | Remove one or more Docker containers | -| `stop_container` | Stop a running Docker container | -| `fetch_container_logs` | Retrieve logs from a Docker container | -| `run_container` | Run a new Docker container | - -#### Volume management - -| Tool | Description | -|------|-------------| -| `list_volumes` | List all Docker volumes | -| `remove_volume` | Remove a Docker volume | -| `create_volume` | Create a new Docker volume | - -#### Image management - -| Tool | Description | -|------|-------------| -| `list_images` | List all Docker images | -| `remove_images` | Remove Docker images | -| `pull_image` | Pull an image from a registry | -| `push_image` | Push an image to a registry | -| `build_image` | Build a Docker image | -| `tag_image` | Tag a Docker image | -| `inspect` | Inspect a Docker object | - -### Kubernetes tools - -Tools to interact with your Kubernetes cluster - -#### Pods - -| Tool | Description | -|------|-------------| -| `list_pods` | List all pods in the cluster | -| `get_pod_logs` | Get logs from a specific pod | - -#### Deployment management - - -| Tool | Description | -|------|-------------| -| `list_deployments` | List all deployments | -| `create_deployment` | Create a new deployment | -| `expose_deployment` | Expose a deployment as a service | -| `remove_deployment` | Remove a deployment | - -#### Service management - -| Tool | Description | -|------|-------------| -| `list_services` | List all services | -| `remove_service` | Remove a service | - -#### Cluster information - -| Tool | Description | -|------|-------------| -| `list_namespaces` | List all namespaces | -| `list_nodes` | List all nodes in the cluster | - -### Docker Scout tools - -Security analysis tools powered by Docker Scout. - -| Tool | Description | -|------|-------------| -| `search_for_cves` | Analyze a Docker image, a project directory, or other artifacts for vulnerabilities using Docker Scout CVEs.search for cves | -| `get_security_recommendations` | Analyze a Docker image, a project directory, or other artifacts for base image update recommendations using Docker Scout. | - -### Developer tools - -General-purpose development utilities. - -| Tool | Description | -|------|-------------| -| `fetch` | Retrieve content from a URL | -| `get_command_help` | Get help for CLI commands | -| `run_command` | Execute shell commands | -| `filesystem` | Perform filesystem operations | -| `git` | Execute git commands | - -### AI model tools - -| Tool | Description | -|------|-------------| -| `list_models` | List all available AI models | -| `pull_model` | Download an AI model | -| `run_model` | Query a model with a prompt | -| `remove_model` | Remove an AI model | - -### AI Tool Catalog - -When the [AI Tool -Catalog](https://open.docker.com/extensions/marketplace?extensionId=docker/labs-ai-tools-for-devs) -Docker Desktop extension is installed, all the tools enabled in the catalog are -available for Gordon to use. After installation, you can enable the usage of the -AI Tool Catalog tools in the toolbox section of Gordon. diff --git a/content/manuals/ai/gordon/mcp/gordon-mcp-server.md b/content/manuals/ai/gordon/mcp/gordon-mcp-server.md deleted file mode 100644 index 39a163ca87d..00000000000 --- a/content/manuals/ai/gordon/mcp/gordon-mcp-server.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Gordon as an MCP server -description: How to use Gordon as an MCP server -keywords: ai, mcp, gordon -aliases: - - /desktop/features/gordon/mcp/gordon-mcp-server/ ---- - -## Gordon as an MCP server - -In addition to functioning as an MCP client, Gordon can also act as an MCP -server. This means that all the tools configured in the toolbox section of -Gordon can be exposed to another MCP client like Claude Desktop, Cursor and -others. - -To use Gordon’s built-in tools in other MCP clients, configure your client of -choice to use the `docker ai mcpserver` command. This allows Gordon to serve its -built-in tools via the MCP protocol for various clients. - -For example, to enable Gordon’s tools in Claude Desktop, add the following -configuration to the Claude configuration file: - -```json -{ - "mcpServers": { - "gordon": { - "command": "docker", - "args": ["ai", "mcpserver"] - } - } -} -``` - -This setup ensures that Claude Desktop can communicate with Gordon as an MCP -server, leveraging its built-in tools. You can follow the [Claude Desktop -documentation](https://modelcontextprotocol.io/quickstart/user) to explore -further. - -### Tool permissions and security - -These tools operate with the same permissions as the user running the -application. - -Any potentially destructive tool call, changing files, deleting images or -stopping containers will ask for your confirmation before proceeding. - -![Gordon page with the delete confirmation question](../images/delete.webp) diff --git a/content/manuals/ai/gordon/mcp/yaml.md b/content/manuals/ai/gordon/mcp/yaml.md deleted file mode 100644 index 326c5d6071a..00000000000 --- a/content/manuals/ai/gordon/mcp/yaml.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: YAML configuration -description: Learn how to use MCP servers with Gordon -keywords: ai, mcp, gordon -aliases: - - /desktop/features/gordon/mcp/yaml/ ---- - -Docker has partnered with Anthropic to build container images for the [reference -implementations](https://github.com/modelcontextprotocol/servers/) of MCP -servers available on Docker Hub under [the mcp -namespace](https://hub.docker.com/u/mcp). - -When you run the `docker ai` command in your terminal to ask a question, Gordon -looks in the `gordon-mcp.yml` file in your working directory (if present) for a -list of MCP servers that should be used when in that context. The -`gordon-mcp.yml` file is a Docker Compose file that configures MCP servers as -Compose services for Gordon to access. - -The following minimal example shows how you can use the [mcp-time -server](https://hub.docker.com/r/mcp/time) to provide temporal capabilities to -Gordon. For more information, you can check out the [source code and -documentation](https://github.com/modelcontextprotocol/servers/tree/main/src/time). - -Create the `gordon-mcp.yml` file in your working directory and add the time - server: - -```yaml -services: - time: - image: mcp/time -``` - -With this file present, you can now ask Gordon to tell you the time in - another timezone: - - ```bash - $ docker ai 'what time is it now in kiribati?' - - • Calling get_current_time - - The current time in Kiribati (Tarawa) is 9:38 PM on January 7, 2025. - - ``` - -As you can see, Gordon found the MCP time server and called its tool when -needed. - -## Advanced usage - -Some MCP servers need access to your filesystem or system environment variables. -Docker Compose can help with this. Since `gordon-mcp.yml` is a Compose file you -can add bind mounts using the regular Docker Compose syntax, which makes your -filesystem resources available to the container: - -```yaml -services: - fs: - image: mcp/filesystem - command: - - /rootfs - volumes: - - .:/rootfs -``` - -The `gordon-mcp.yml` file adds filesystem access capabilities to Gordon and -since everything runs inside a container Gordon only has access to the -directories you specify. - -Gordon can handle any number of MCP servers. For example, if you give Gordon -access to the internet with the `mcp/fetch` server: - -```yaml -services: - fetch: - image: mcp/fetch - fs: - image: mcp/filesystem - command: - - /rootfs - volumes: - - .:/rootfs -``` - -You can now ask things like: - -```bash -$ docker ai can you fetch rumpl.dev and write the summary to a file test.txt - - • Calling fetch ✔️ - • Calling write_file ✔️ - - The summary of the website rumpl.dev has been successfully written to the file test.txt in the allowed directory. Let me know if you need further assistance! - - -$ cat test.txt -The website rumpl.dev features a variety of blog posts and articles authored by the site owner. Here's a summary of the content: - -1. **Wasmio 2023 (March 25, 2023)**: A recap of the WasmIO 2023 conference held in Barcelona. The author shares their experience as a speaker and praises the organizers for a successful event. - -2. **Writing a Window Manager in Rust - Part 2 (January 3, 2023)**: The second part of a series on creating a window manager in Rust. This installment focuses on enhancing the functionality to manage windows effectively. - -3. **2022 in Review (December 29, 2022)**: A personal and professional recap of the year 2022. The author reflects on the highs and lows of the year, emphasizing professional achievements. - -4. **Writing a Window Manager in Rust - Part 1 (December 28, 2022)**: The first part of the series on building a window manager in Rust. The author discusses setting up a Linux machine and the challenges of working with X11 and Rust. - -5. **Add docker/docker to your dependencies (May 10, 2020)**: A guide for Go developers on how to use the Docker client library in their projects. The post includes a code snippet demonstrating the integration. - -6. **First (October 11, 2019)**: The inaugural post on the blog, featuring a simple "Hello World" program in Go. -``` - -## What’s next? - -Now that you’ve learned how to use MCP servers with Gordon, here are a few ways -you can get started: - -- Experiment: Try integrating one or more of the tested MCP servers into your - `gordon-mcp.yml` file and explore their capabilities. -- Explore the ecosystem: Check out the [reference implementations on - GitHub](https://github.com/modelcontextprotocol/servers/) or browse the - [Docker Hub MCP namespace](https://hub.docker.com/u/mcp) for additional - servers that might suit your needs. -- Build your own: If none of the existing servers meet your needs, or you’re - curious about exploring how they work in more detail, consider developing a - custom MCP server. Use the [MCP - specification](https://www.anthropic.com/news/model-context-protocol) as a - guide. -- Share your feedback: If you discover new servers that work well with Gordon - or encounter issues with existing ones, [share your findings to help improve - the ecosystem](https://docker.qualtrics.com/jfe/form/SV_9tT3kdgXfAa6cWa). - -With MCP support, Gordon offers powerful extensibility and flexibility to meet -your specific use cases whether you’re adding temporal awareness, file -management, or internet access. - -### Compatible MCP servers - -These are MCP servers that have been tested with Gordon and are known to be -working: - -- `mcp/time` -- `mcp/fetch` -- `mcp/filesystem` -- `mcp/postgres` -- `mcp/git` -- `mcp/sqlite` -- `mcp/github` - -### Untested (should work with appropriate API tokens) - -These are MCP servers that were not tested but should work if given the -appropriate API tokens: - -- `mcp/brave-search` -- `mcp/gdrive` -- `mcp/slack` -- `mcp/google-maps` -- `mcp/gitlab` -- `mcp/everything` -- `mcp/aws-kb-retrieval-server` -- `mcp/sentry` - -### Unsupported - -These are MCP servers that are currently known to be unsupported: - -- `mcp/sequentialthinking` - (The tool description is too long) -- `mcp/puppeteer` - Puppeteer sends back images and Gordon doesn’t know how to - handle them, it only handles text responses from tools -- `mcp/everart` - Everart sends back images and Gordon doesn’t know how to - handle them, it only handles text responses from tools -- `mcp/memory` - There is no way to configure the server to use a custom path - for its knowledge base diff --git a/content/manuals/ai/gordon/use-cases.md b/content/manuals/ai/gordon/use-cases.md new file mode 100644 index 00000000000..1eef4d2e0a7 --- /dev/null +++ b/content/manuals/ai/gordon/use-cases.md @@ -0,0 +1,129 @@ +--- +title: Gordon use cases and examples +linkTitle: Use cases +description: Example prompts for common Docker workflows +weight: 10 +--- + +{{< summary-bar feature_name="Gordon" >}} + +Gordon handles Docker workflows through natural conversation. This page shows +example prompts for the most common use cases. + +## Debug and troubleshoot + +Fix broken containers, diagnose build failures, and resolve issues. + +```console +# Diagnose container crashes +$ docker ai "why did my postgres container crash?" + +# Debug build failures +$ docker ai "my build is failing at the pip install step, what's wrong?" + +# Fix networking issues +$ docker ai "my web container can't reach my database container" + +# Investigate performance problems +$ docker ai "my container is using too much memory, help me investigate" +``` + +## Build and containerize + +Create Docker assets for applications and migrate to hardened images. + +```console +# Create Dockerfile from scratch +$ docker ai "create a Dockerfile for my Node.js application" + +# Generate compose file +$ docker ai "create a docker-compose.yml for my application stack" + +# Migrate to Docker Hardened Images +$ docker ai "migrate my Dockerfile to use Docker Hardened Images" +``` + +## Execute operations + +Run Docker commands to manage containers, images, and resources. + +```console +# Start containers with configuration +$ docker ai "run a redis container with persistence" + +# Build and tag images +$ docker ai "build my Dockerfile and tag it for production" + +# Clean up resources +$ docker ai "clean up all unused Docker resources" +``` + +## Develop and optimize + +Improve Dockerfiles and configure secure, efficient development environments. + +```console +# Optimize existing Dockerfile +$ docker ai "rate my Dockerfile and suggest improvements" + +# Add security improvements +$ docker ai "make my Dockerfile more secure" + +# Configure development workflow +$ docker ai "set up my container for development with hot reload" +``` + +## Manage resources + +Inspect containers, images, and resource usage. + +```console +# Check container status +$ docker ai "show me all my containers and their status" + +# Analyze disk usage +$ docker ai "how much disk space is Docker using?" + +# Review image details +$ docker ai "list my images sorted by size" +``` + +## Learn Docker + +Understand concepts and commands in the context of your projects. + +```console +# Explain Docker concepts +$ docker ai "explain how Docker networking works" + +# Understand commands +$ docker ai "what's the difference between COPY and ADD in Dockerfile?" + +# Get troubleshooting guidance +$ docker ai "how do I debug a container that exits immediately?" +``` + + +## Writing effective prompts + +Be specific: +- Include relevant context: "my postgres container" not "the database" +- State your goal: "make my build faster" not "optimize" +- Include error messages when debugging + +Gordon works best when you describe what you want to achieve rather than how to +do it. + +### Working directory context + +When using `docker ai` in the CLI, Gordon uses your current working directory +as the default context for file operations. Change to your project directory +before starting Gordon to ensure it has access to the right files: + +```console +$ cd ~/my-project +$ docker ai "review my Dockerfile" +``` + +You can also override the working directory with the `-C` flag. See [Using +Gordon via CLI](./how-to/cli.md#working-directory) for details. diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/_index.md b/content/manuals/ai/mcp-catalog-and-toolkit/_index.md index ef550dacb32..891b35f08a7 100644 --- a/content/manuals/ai/mcp-catalog-and-toolkit/_index.md +++ b/content/manuals/ai/mcp-catalog-and-toolkit/_index.md @@ -3,41 +3,94 @@ title: Docker MCP Catalog and Toolkit linkTitle: MCP Catalog and Toolkit params: sidebar: - group: AI + group: AI and agents badge: - color: green - text: New -weight: 30 -description: Learn about Docker's MCP catalog on Docker Hub and how to use it with the MCP Toolkit extension + color: blue + text: Beta +weight: 20 +description: Learn about Docker's MCP catalog on Docker Hub keywords: Docker, ai, mcp servers, ai agents, extension, docker desktop, llm, docker hub grid: - - title: MCP Catalog - description: Learn about the benefits of the MCP Catalog, how you can use it, and how you can contribute - icon: hub - link: /ai/mcp-catalog-and-toolkit/catalog/ - - title: MCP Toolkit - description: Learn about how to use the MCP Toolkit extension on Docker Desktop - icon: manufacturing - link: /ai/mcp-catalog-and-toolkit/toolkit/ + - title: Get started with MCP Toolkit + description: Learn how to quickly install and use the MCP Toolkit to set up servers and clients. + icon: explore + link: /ai/mcp-catalog-and-toolkit/get-started/ + - title: MCP Catalog + description: Browse Docker's curated collection of verified MCP servers + icon: hub + link: /ai/mcp-catalog-and-toolkit/catalog/ + - title: MCP Profiles + description: Organize servers into profiles for different projects and share configurations + icon: folder + link: /ai/mcp-catalog-and-toolkit/profiles/ + - title: MCP Toolkit + description: Use Docker Desktop's UI to discover, configure, and manage MCP servers + icon: /icons/toolkit.svg + link: /ai/mcp-catalog-and-toolkit/toolkit/ + - title: MCP Gateway + description: Use the CLI and Gateway to run MCP servers with custom configurations + icon: developer_board + link: /ai/mcp-catalog-and-toolkit/mcp-gateway/ + - title: Dynamic MCP + description: Discover and add MCP servers on-demand using natural language + icon: search + link: /ai/mcp-catalog-and-toolkit/dynamic-mcp/ + - title: Docker Hub MCP server + description: Use the Docker Hub MCP server to search images and manage repositories + icon: device_hub + link: /ai/mcp-catalog-and-toolkit/hub-mcp/ + - title: Security FAQs + description: Common questions about MCP security, credentials, and server verification + icon: security + link: /ai/mcp-catalog-and-toolkit/faqs/ + - title: E2B sandboxes + description: Cloud sandboxes for AI agents with built-in MCP Catalog access + icon: cloud + link: /ai/mcp-catalog-and-toolkit/e2b-sandboxes/ --- -The Model Context Protocol (MCP) is a modern standard that transforms AI agents from passive responders into action-oriented systems. By standardizing how tools are described, discovered, and invoked, MCP enables agents to securely query APIs, access data, and execute services across diverse environments. +{{< summary-bar feature_name="Docker MCP Catalog and Toolkit" >}} -As agents move into production, MCP solves common integration challenges — interoperability, reliability, and security — by providing a consistent, decoupled, and scalable interface between agents and tools. Just as containers redefined software deployment, MCP is reshaping how AI systems interact with the world. +[Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) is +an open protocol that standardizes how AI applications access external tools +and data sources. By connecting LLMs to local development tools, databases, +APIs, and other resources, MCP extends their capabilities beyond their base +training. -## What is Docker MCP Catalog and Toolkit? +The challenge is that running MCP servers locally creates operational friction. +Each server requires separate installation and configuration for every +application you use. You run untrusted code directly on your machine, manage +updates manually, and troubleshoot dependency conflicts yourself. Configure a +GitHub server for Claude, then configure it again for Cursor, and so on. Each +time you manage credentials, permissions, and environment setup. -Docker MCP Catalog and Toolkit is a comprehensive solution for securely building, sharing, and running MCP tools. It simplifies the developer experience across four key areas: +## Docker MCP features -- Discovery: A central catalog with verified, versioned tools -- Credential Management: OAuth-based and secure by default -- Execution: Tools run in isolated, containerized environments -- Portability: Use MCP tools across Claude, Cursor, VS Code, and more — no code changes needed +The [MCP Toolkit](/ai/mcp-catalog-and-toolkit/toolkit/) and [MCP +Gateway](/ai/mcp-catalog-and-toolkit/mcp-gateway/) solve these challenges +through centralized management. Instead of configuring each server for every AI +application separately, you set things up once and connect all your clients to +it. The workflow centers on three concepts: catalogs, profiles, and clients. -With Docker Hub and the Docker Desktop extension, you can: +![MCP overview](./images/mcp_toolkit.avif) -- Launch MCP servers in seconds -- Add tools via CLI or GUI -- Rely on Docker’s pull-based infrastructure for trusted delivery +[Catalogs](/ai/mcp-catalog-and-toolkit/catalog/) are curated collections of +MCP servers. The Docker MCP Catalog provides 300+ verified servers packaged as +container images with versioning, provenance, and security updates. Organizations +can create [custom +catalogs](/ai/mcp-catalog-and-toolkit/catalog/#custom-catalogs) with approved +servers for their teams. + +[Profiles](/ai/mcp-catalog-and-toolkit/profiles/) organize servers into named +collections for different projects. Your "web-dev" profile might use GitHub and +Playwright; your "backend" profile, database tools. Profiles support both +containerized servers from catalogs and remote MCP servers. Configure a profile +once, then share it across clients or with your team. + +Clients are the AI applications that connect to your profiles. Claude Code, +Cursor, Zed, and others connect through the MCP Gateway, which routes requests +to the right server and handles authentication and lifecycle management. + +## Learn more {{< grid >}} diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/catalog.md b/content/manuals/ai/mcp-catalog-and-toolkit/catalog.md index 388f3925d70..284d4198b31 100644 --- a/content/manuals/ai/mcp-catalog-and-toolkit/catalog.md +++ b/content/manuals/ai/mcp-catalog-and-toolkit/catalog.md @@ -1,94 +1,139 @@ --- title: Docker MCP Catalog -description: Learn about the benefits of the MCP Catalog, how you can use it, and how you can contribute -keywords: docker hub, mcp, mcp servers, ai agents, calatog, docker +linkTitle: Catalog +description: Browse Docker's curated collection of verified MCP servers, and create custom catalogs for your team or organization. +keywords: docker hub, mcp, mcp servers, ai agents, catalog, custom catalog, docker +weight: 20 --- -The [Docker MCP Catalog](https://hub.docker.com/catalogs/mcp) is a centralized, trusted registry for discovering, sharing, and running MCP-compatible tools. Seamlessly integrated into Docker Hub, it offers verified, versioned, and curated MCP servers packaged as Docker images. +{{< summary-bar feature_name="Docker MCP Catalog" >}} -The catalog also solves common MCP server challenges: +The [Docker MCP Catalog](https://hub.docker.com/mcp) is a curated collection of +verified MCP servers, packaged as Docker images and distributed through Docker +Hub. It solves common challenges with running MCP servers locally: environment +conflicts, setup complexity, and security concerns. -- Environment conflicts: Tools often need specific runtimes that may clash with existing setups. -- Lack of isolation: Traditional setups risk exposing the host system. -- Setup complexity: Manual installation and configuration result in slow adoption. -- Inconsistency across platforms: Tools may behave unpredictably on different OSes. +The catalog serves as the source of available MCP servers. When you add servers +to your [profiles](/manuals/ai/mcp-catalog-and-toolkit/profiles.md), you select +them from the catalog. Each server runs as an isolated container, making it +portable and consistent across different environments. -With Docker, each MCP server runs as a self-contained container so it is portable, isolated, and consistent. You can launch tools instantly using Docker CLI or Docker Desktop, without worrying about dependencies or compatibility. +> [!NOTE] +> E2B sandboxes now include direct access to the Docker MCP Catalog, giving +> developers access to over 200 tools and services to seamlessly build and run +> AI agents. For more information, see [E2B Sandboxes](e2b-sandboxes.md). -## Key features +## What's in the catalog -- Over 100 verified MCP servers in one place -- Publisher verification and versioned releases -- Pull-based distribution using Docker’s infrastructure -- Tools provided by partners such as New Relic, Stripe, Grafana, and more +The Docker MCP Catalog includes: -## How it works +- Verified servers: All servers are versioned with full provenance and SBOM + metadata +- Partner tools: Servers from New Relic, Stripe, Grafana, and other trusted + partners +- Docker-built servers: Locally-running servers built and digitally signed by + Docker for enhanced security +- Remote services: Cloud-hosted servers that connect to external services like + GitHub, Notion, and Linear -Each tool in the MCP Catalog is packaged as a Docker image with metadata. Developers can: +### Local versus remote servers -- Discover tools via Docker Hub under the mcp/ namespace. -- Connect tools to their preferred agents with simple configuration through the [MCP Toolkit](toolkit.md) -- Pull and run tools using Docker Desktop or the CLI. +The catalog contains two types of servers based on where they run: -Each catalog entry provides: +Local servers run as containers on your machine. They work offline once +downloaded and offer predictable performance and complete data privacy. Docker +builds and signs all local servers in the catalog. -- Tool description and metadata -- Version history -- Example configuration for agent integration +Remote servers run on the provider's infrastructure and connect to external +services. Many remote servers use OAuth authentication, which the MCP Toolkit +handles automatically through your browser. -## Example: How to use an MCP server from Docker Hub +## Browse the catalog -The following example uses the Puppeteer MCP server to take a screenshot of a website and invert the colors using Claude Desktop. +Browse available MCP servers at [hub.docker.com/mcp](https://hub.docker.com/mcp) +or directly in Docker Desktop: -{{< tabs >}} -{{< tab name="Using the MCP Toolkit (Recommended)" >}} +1. In Docker Desktop, select **MCP Toolkit**. +2. Select the **Catalog** tab to browse available servers. +3. Select a server to view its description, tools, and configuration options. -1. Make sure you have [installed the Docker Desktop Docker MCP Toolkit extension](toolkit.md). -2. From the extension, search for the Puppeteer MCP server in the **MCP Servers** tab, and toggle it on to enable. -3. From the **MCP Clients** tab, select the **Connect** button for Claude Desktop. -4. Within Claude Desktop, submit the following prompt using the Sonnet 3.5 model: +## Add servers to a profile - ```text - Take a screenshot of docs.docker.com and then invert the colors - ``` +To add a server from the catalog to a profile: -{{< /tab >}} -{{< tab name="Manually set it up" >}} +1. In the **Catalog** tab, select the checkbox next to a server. +2. Choose the profile to add it to from the drop-down. -1. Update the `claude_desktop_config.json` file to include the following configuration: +For step-by-step instructions and client connection, see +[Get started with MCP Toolkit](get-started.md) or +[MCP Profiles](profiles.md). - ```json - { - "mcpServers": { - "puppeteer": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "DOCKER_CONTAINER", - "mcp/puppeteer" - ], - "env": { - "DOCKER_CONTAINER": "true" - } - } - } - } - ``` -2. Restart Claude Desktop to apply the changed config file. -3. Submit the following prompt using the Sonnet 3.5 model: +## Custom catalogs - ```text - Take a screenshot of docs.docker.com and then invert the colors - ``` +Custom catalogs let you curate focused collections of servers for your team or +organization. Instead of exposing all 300+ servers in the Docker catalog, you +define exactly which servers are available. -Once you've given your consent to use the new tools, Claude spins up the Puppeteer MCP server inside a container, navigates to the target URL, captures and modify the page, and returns the screenshot. +Common use cases: -{{< /tab >}} -{{< /tabs >}} +- Restrict which servers your organization approves for use +- Add your organization's private MCP servers alongside public ones +- Control which server versions your team uses +- Define the server set available to AI agents using [Dynamic MCP](dynamic-mcp.md) + +### Custom catalogs with Dynamic MCP + +Custom catalogs work particularly well with +[Dynamic MCP](/ai/mcp-catalog-and-toolkit/dynamic-mcp/), where agents discover +and add MCP servers on-demand during conversations. When you run the gateway +with a custom catalog, the `mcp-find` tool searches only within that catalog. +If your catalog contains 20 servers instead of 300+, agents work within that +focused set, discovering and enabling tools as needed without manual +configuration each time. + +### Import a custom catalog + +If someone on your team has created and published a catalog, you can import it +using its OCI registry reference. + +In Docker Desktop: + +1. Select **MCP Toolkit** and select the **Catalog** tab. +2. Select **Import catalog**. +3. Enter the OCI reference for the catalog (for example, + `registry.example.com/mcp/team-catalog:latest`). +4. Select **Import**. + +Using the CLI: + +```console +$ docker mcp catalog pull +``` + +Once imported, the catalog appears alongside the Docker catalog and you can add +its servers to your profiles. + +### Create and manage custom catalogs + +Creating and managing custom catalogs requires the CLI. See +[Custom catalogs](/manuals/ai/mcp-catalog-and-toolkit/cli.md#custom-catalogs) +in the CLI how-to for step-by-step instructions, including: + +- Curating a subset of the Docker catalog +- Adding private servers to a catalog +- Building a focused catalog from scratch +- Pushing a catalog to a registry for your team to import ## Contribute an MCP server to the catalog -If you would like to add you MCP server to the Docker MCP Catalog, fill out the Docker [MCP submission form](https://www.docker.com/products/mcp-catalog-and-toolkit/#get_updates). \ No newline at end of file +The MCP server registry is available at +https://github.com/docker/mcp-registry. To submit an MCP server, follow the +[contributing guidelines](https://github.com/docker/mcp-registry/blob/main/CONTRIBUTING.md). + +When your pull request is reviewed and approved, your MCP server is available +within 24 hours on: + +- Docker Desktop's [MCP Toolkit feature](toolkit.md). +- The [Docker MCP Catalog](https://hub.docker.com/mcp). +- The [Docker Hub](https://hub.docker.com/u/mcp) `mcp` namespace (for MCP + servers built by Docker). diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/cli.md b/content/manuals/ai/mcp-catalog-and-toolkit/cli.md new file mode 100644 index 00000000000..dd80b53564a --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/cli.md @@ -0,0 +1,385 @@ +--- +title: Use MCP Toolkit from the CLI +linkTitle: Use with CLI +description: Manage MCP profiles, servers, and catalogs using Docker MCP CLI. +keywords: docker mcp, cli, profiles, servers, catalog, gateway +weight: 35 +--- + +{{< summary-bar feature_name="Docker MCP Toolkit" >}} + +> [!NOTE] +> The `docker mcp` commands documented here are available in Docker Desktop +> 4.62 and later. Earlier versions may not support all commands shown. + +The `docker mcp` commands let you manage MCP profiles, servers, OAuth +credentials, and catalogs from the terminal. Use the CLI for scripting, +automation, and headless environments. + +## Profiles + +### Create a profile + +```console +$ docker mcp profile create --name +``` + +The profile ID is used to reference the profile in subsequent commands: + +```console +$ docker mcp profile create --name web-dev +``` + +### List profiles + +```console +$ docker mcp profile list +``` + +### View a profile + +```console +$ docker mcp profile show +``` + +### Remove a profile + +```console +$ docker mcp profile remove +``` + +> [!CAUTION] +> Removing a profile deletes all its server configurations and settings. This +> action can't be undone. + +## Servers + +### Browse the catalog + +List available servers and their IDs: + +```console +$ docker mcp catalog server ls mcp/docker-mcp-catalog +``` + +The output lists each server by name. The name (for example, `playwright` or +`github-official`) is the server ID to use in `catalog://` URIs. + +To look up a server ID in Docker Desktop, open **MCP Toolkit** > **Catalog**, +select a server, and check the **Server ID** field. + +### Add servers to a profile + +Servers are referenced by URI. The URI format depends on where the server +comes from: + +| Format | Source | +| ------------------------------------- | -------------------------- | +| `catalog:///` | An OCI catalog | +| `docker://:` | A Docker image | +| `https:///v0/servers/` | The MCP community registry | +| `file://` | A local YAML or JSON file | + +The most common format is `catalog://`, where `` matches the +**Catalog** field and `` matches the **Server ID** field shown in +Docker Desktop or in the `catalog server ls` output: + +```console +$ docker mcp profile server add \ + --server catalog:/// +``` + +Add multiple servers in one command: + +```console +$ docker mcp profile server add web-dev \ + --server catalog://mcp/docker-mcp-catalog/github-official \ + --server catalog://mcp/docker-mcp-catalog/playwright +``` + +To add a server defined in a local YAML file: + +```console +$ docker mcp profile server add my-profile \ + --server file://./my-server.yaml +``` + +The YAML file defines the server image and configuration: + +```yaml +name: my-server +title: My Server +type: server +image: myimage:latest +description: Description of the server +``` + +If the server requires OAuth authentication, authorize it in Docker Desktop +after adding. See [OAuth authentication](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md#oauth-authentication). + +### List servers + +List all servers across all profiles: + +```console +$ docker mcp profile server ls +``` + +Filter by profile: + +```console +$ docker mcp profile server ls --filter profile=web-dev +``` + +### Remove a server + +```console +$ docker mcp profile server remove --name +``` + +Remove multiple servers at once: + +```console +$ docker mcp profile server remove web-dev \ + --name github-official \ + --name playwright +``` + +### Configure server settings + +Set and retrieve configuration values for servers in a profile: + +```console +$ docker mcp profile config --set .= +$ docker mcp profile config --get-all +$ docker mcp profile config --del . +``` + +Server configuration keys and their expected values are defined by each server. +Check the server's documentation or its entry in Docker Desktop under +**MCP Toolkit** > **Catalog** > **Configuration**. + +## Gateway + +Run the MCP Gateway with a specific profile: + +```console +$ docker mcp gateway run --profile +``` + +Omit `--profile` to use the default profile. + +### Connect a client manually + +To connect any client that isn't listed in Docker Desktop, configure it to run +the gateway over `stdio`. For example, in a JSON-based client configuration: + +```json +{ + "servers": { + "MCP_DOCKER": { + "command": "docker", + "args": ["mcp", "gateway", "run", "--profile", "web-dev"], + "type": "stdio" + } + } +} +``` + +For Claude Desktop, the format is: + +```json +{ + "mcpServers": { + "MCP_DOCKER": { + "command": "docker", + "args": ["mcp", "gateway", "run", "--profile", "web-dev"] + } + } +} +``` + +### Connect a named client + +Connect a supported client to a profile: + +```console +$ docker mcp client connect --profile +``` + +For example, to connect VS Code to a project-specific profile: + +```console +$ docker mcp client connect vscode --profile my-project +``` + +This creates a `.vscode/mcp.json` file in the current directory. Because this +is a user-specific file, add it to `.gitignore`: + +```console +$ echo ".vscode/mcp.json" >> .gitignore +``` + +## Share profiles + +Share profiles with your team using OCI registries or version control. + +### Share via OCI registry + +Profiles are shared as OCI artifacts via any OCI-compatible registry. +Credentials are not included for security reasons. Team members configure +authentication credentials separately after pulling. + +To push an existing profile called `web-dev` to an OCI registry: + +```console +$ docker mcp profile push web-dev registry.example.com/profiles/web-dev:v1 +``` + +To pull the same profile: + +```console +$ docker mcp profile pull registry.example.com/profiles/team-standard:latest +``` + +### Share via version control + +For project-specific profiles, you can use the `export` and `import` commands +and store the profiles in version control alongside your code. Team members can +import the file to get the same configuration. + +To export a profile to your project directory: + +```console +$ mkdir -p .docker +$ docker mcp profile export web-dev .docker/mcp-profile.json +``` + +Team members who clone the repository can import the profile: + +```console +$ docker mcp profile import .docker/mcp-profile.json +``` + +This creates a profile with the servers and configuration defined in the +file. Any authentication credentials must be configured separately if needed. + +## Custom catalogs + +Custom catalogs let you curate a focused collection of servers for your team +or organization. For an overview of what custom catalogs are and when to use +them, see [Custom catalogs](/manuals/ai/mcp-catalog-and-toolkit/catalog.md#custom-catalogs). + +Catalogs are referenced by OCI reference, for example +`registry.example.com/mcp/my-catalog:latest`. Servers within a catalog use +the same URI schemes as when +[adding servers to a profile](#add-servers-to-a-profile). + +### Customize the Docker catalog + +Use the Docker catalog as a base, then add or remove servers to fit your +organization's needs. Copy it first: + +```console +$ docker mcp catalog tag mcp/docker-mcp-catalog \ + registry.example.com/mcp/company-tools:latest +``` + +List the servers it contains: + +```console +$ docker mcp catalog server ls registry.example.com/mcp/company-tools:latest +``` + +Remove servers your organization doesn't approve: + +```console +$ docker mcp catalog server remove \ + registry.example.com/mcp/company-tools:latest \ + --name +``` + +Add your own private servers, packaged as Docker images: + +```console +$ docker mcp catalog server add registry.example.com/mcp/company-tools:latest \ + --server docker://registry.example.com/mcp/internal-api:latest \ + --server docker://registry.example.com/mcp/data-pipeline:latest +``` + +Push when ready: + +```console +$ docker mcp catalog push registry.example.com/mcp/company-tools:latest +``` + +### Build a catalog from scratch + +To include exactly what you choose and nothing else, create a catalog from +scratch. You can include servers from the Docker catalog, your own private +images, or both. + +Create a catalog and specify which servers to include: + +```console +$ docker mcp catalog create registry.example.com/mcp/data-tools:latest \ + --title "Data Analysis Tools" \ + --server catalog://mcp/docker-mcp-catalog/sequentialthinking \ + --server catalog://mcp/docker-mcp-catalog/brave \ + --server docker://registry.example.com/mcp/analytics:latest +``` + +View the result: + +```console +$ docker mcp catalog show registry.example.com/mcp/data-tools:latest +``` + +Push to distribute: + +```console +$ docker mcp catalog push registry.example.com/mcp/data-tools:latest +``` + +### Distribute a catalog + +Push your catalog so team members can import it: + +```console +$ docker mcp catalog push +``` + +Team members can pull it using the CLI: + +```console +$ docker mcp catalog pull +``` + +Or import it using Docker Desktop: select **MCP Toolkit** > **Catalog** > +**Import catalog** and enter the OCI reference. + +### Use a custom catalog with the gateway + +Run the gateway with your catalog instead of the default Docker catalog: + +```console +$ docker mcp gateway run --catalog +``` + +For [Dynamic MCP](/manuals/ai/mcp-catalog-and-toolkit/dynamic-mcp.md), where +agents discover and add servers during conversations, this limits what agents +can find to your curated set. + +To enable specific servers from your catalog without using a profile: + +```console +$ docker mcp gateway run --catalog \ + --servers --servers +``` + +## Further reading + +- [Get started with MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/get-started.md) +- [MCP Profiles](/manuals/ai/mcp-catalog-and-toolkit/profiles.md) +- [MCP Catalog](/manuals/ai/mcp-catalog-and-toolkit/catalog.md) +- [MCP Gateway](/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md) diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/dynamic-mcp.md b/content/manuals/ai/mcp-catalog-and-toolkit/dynamic-mcp.md new file mode 100644 index 00000000000..4194002b1bc --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/dynamic-mcp.md @@ -0,0 +1,152 @@ +--- +title: Dynamic MCP +linkTitle: Dynamic discovery +description: Discover and add MCP servers on-demand using natural language with Dynamic MCP servers +keywords: dynamic mcps, mcp discovery, mcp-find, mcp-add, code-mode, ai agents, model context protocol +weight: 40 +--- + +Dynamic MCP enables AI agents to discover and add MCP servers on-demand during +a conversation, without manual configuration. Instead of pre-configuring every +MCP server before starting your agent session, clients can search the +[MCP Catalog](/manuals/ai/mcp-catalog-and-toolkit/catalog.md) and add servers +as needed. + +This capability is enabled automatically when you connect an MCP client to the +[MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md). The gateway +provides a set of primordial tools that agents use to discover and manage +servers during runtime. + +{{% experimental %}} + +Dynamic MCP is an experimental feature in early development. While you're +welcome to try it out and explore its capabilities, you may encounter +unexpected behavior or limitations. Feedback is welcome via at [GitHub +issues](https://github.com/docker/mcp-gateway/issues) for bug reports and +[GitHub discussions](https://github.com/docker/mcp-gateway/discussions) for +general questions and feature requests. + +{{% /experimental %}} + +## How it works + +When you connect a client to the MCP Gateway, the gateway exposes a small set +of management tools alongside any MCP servers in your active profile. These +management tools let agents interact with the gateway's configuration: + +| Tool | Description | +| ---------------- | ------------------------------------------------------------------------ | +| `mcp-find` | Search for MCP servers in the catalog by name or description | +| `mcp-add` | Add a new MCP server to the current session | +| `mcp-config-set` | Configure settings for an MCP server | +| `mcp-remove` | Remove an MCP server from the session | +| `mcp-exec` | Execute a tool by name that exists in the current session | +| `code-mode` | Create a JavaScript-enabled tool that combines multiple MCP server tools | + +With these tools available, an agent can search the catalog, add servers, +handle authentication, and use newly added tools directly without requiring a +restart or manual configuration. + +Dynamically added servers and tools are associated with your _current session +only_. They're not persisted to your profile. When you start a new session, +only servers you've added to your profile through the +[MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md) or +[Profiles](/manuals/ai/mcp-catalog-and-toolkit/profiles.md) are available. + +## Prerequisites + +To use Dynamic MCP, you need: + +- Docker Desktop version 4.50 or later, with [MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md) enabled +- An LLM application that supports MCP (such as Claude Desktop, Visual Studio Code, or Claude Code) +- Your client configured to connect to the MCP Gateway + +See [Get started with Docker MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/get-started.md) +for setup instructions. + +## Usage + +Dynamic MCP is enabled automatically when you use the MCP Toolkit. Your +connected clients can now use `mcp-find`, `mcp-add`, and other management tools +during conversations. + +To see Dynamic MCP in action, connect your AI client to the Docker MCP Toolkit +and try this prompt: + +```plaintext +What MCP servers can I use for working with SQL databases? +``` + +Given this prompt, your agent will use the `mcp-find` tool provided by MCP +Toolkit to search for SQL-related servers in the [MCP Catalog](./catalog.md). + +And to add a server to a session, simply write a prompt and the MCP Toolkit +takes care of installing and running the server: + +```plaintext +Add the postgres mcp server +``` + +## Tool composition with code mode + +The `code-mode` tool is available as an experimental capability for creating +custom JavaScript functions that combine multiple MCP server tools. The +intended use case is to enable workflows that coordinate multiple services +in a single operation. + +> **Note** +> +> Code mode is in early development and is not yet reliable for general use. +> The documentation intentionally omits usage examples at this time. +> +> The core Dynamic MCP capabilities (`mcp-find`, `mcp-add`, `mcp-config-set`, +> `mcp-remove`) work as documented and are the recommended focus for current +> use. + +The architecture works as follows: + +1. The agent calls `code-mode` with a list of server names and a tool name +2. The gateway creates a sandbox with access to those servers' tools +3. A new tool is registered in the current session with the specified name +4. The agent calls the newly created tool +5. The code executes in the sandbox with access to the specified tools +6. Results are returned to the agent + +The sandbox can only interact with the outside world through MCP tools, +which are already running in isolated containers with restricted privileges. + +## Security considerations + +Dynamic MCP maintains the same security model as static MCP server +configuration in MCP Toolkit: + +- All servers in the MCP Catalog are built, signed, and maintained by Docker +- Servers run in isolated containers with restricted resources +- Code mode runs agent-written JavaScript in an isolated sandbox that can only + interact through MCP tools +- Credentials are managed by the gateway and injected securely into containers + +The key difference with dynamic capabilities is that agents can add new tools +during runtime. + +## Disabling Dynamic MCP + +Dynamic MCP is enabled by default in the MCP Toolkit. If you prefer to use only +statically configured MCP servers, you can disable the dynamic tools feature: + +```console +$ docker mcp feature disable dynamic-tools +``` + +To re-enable the feature later: + +```console +$ docker mcp feature enable dynamic-tools +``` + +After changing this setting, you may need to restart any connected MCP clients. + +## Further reading + +Check out the [Dynamic MCP servers with Docker](https://docker.com/blog) blog +post for more examples and inspiration on how you can use dynamic tools. diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/e2b-sandboxes.md b/content/manuals/ai/mcp-catalog-and-toolkit/e2b-sandboxes.md new file mode 100644 index 00000000000..b3e2b0c1ff2 --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/e2b-sandboxes.md @@ -0,0 +1,452 @@ +--- +title: E2B sandboxes +description: Cloud-based secure sandboxes for AI agents with built-in Docker MCP Gateway integration +keywords: E2B, cloud sandboxes, MCP Gateway, AI agents, MCP Catalog +aliases: + - /ai/mcp-catalog-and-toolkit/sandboxes/ +--- + +Docker has partnered with [E2B](https://e2b.dev/), a provider of secure cloud sandboxes for AI agents. E2B sandboxes include direct access to Docker's [MCP Catalog](https://hub.docker.com/mcp), a collection of 200+ tools from publishers including GitHub, Notion, and Stripe. + +When you create a sandbox, you specify which MCP tools it should access. E2B launches these tools and provides access through the Docker MCP Gateway. + +## Example: Using GitHub and Notion MCP server + +This example demonstrates how to connect multiple MCP servers in an E2B sandbox. You'll analyze data in Notion and create GitHub issues using Claude. + +### Prerequisites + +Before you begin, make sure you have the following: + +- [E2B account](https://e2b.dev/docs/quickstart) with API access +- Anthropic API key for Claude + + > [!NOTE] + > This example uses Claude Code, which is pre-installed in E2B sandboxes. + > However, you can adapt the example to work with other AI assistants of your + > choice. See [E2B's MCP documentation](https://e2b.dev/docs/mcp/quickstart) + > for alternative connection methods. + +- Node.js 18+ installed on your machine +- Notion account with: + - A database containing sample data + - [Integration token](https://www.notion.com/help/add-and-manage-connections-with-the-api) +- GitHub account with: + - A repository for testing + - Personal access token with `repo` scope + +### Set up your environment + +Create a new directory and initialize a Node.js project: + +```console +$ mkdir mcp-e2b-quickstart +$ cd mcp-e2b-quickstart +$ npm init -y +``` + +Configure your project for ES modules by updating `package.json`: + +```json +{ + "name": "mcp-e2b-quickstart", + "version": "1.0.0", + "type": "module", + "scripts": { + "start": "node index.js" + } +} +``` + +Install required dependencies: + +```console +$ npm install e2b dotenv +``` + +Create a `.env` file with your credentials: + +```console +$ cat > .env << 'EOF' +E2B_API_KEY=your_e2b_api_key_here +ANTHROPIC_API_KEY=your_anthropic_api_key_here +NOTION_INTEGRATION_TOKEN=ntn_your_notion_integration_token_here +GITHUB_TOKEN=ghp_your_github_pat_here +EOF +``` + +Protect your credentials: + +```console +$ echo ".env" >> .gitignore +$ echo "node_modules/" >> .gitignore +``` + +### Create an E2B sandbox with MCP servers + +{{< tabs group="" >}} +{{< tab name="Typescript">}} + +Create a file named `index.ts`: + +```typescript +import "dotenv/config"; +import { Sandbox } from "e2b"; + +async function quickstart(): Promise { + console.log("Creating E2B sandbox with Notion and GitHub MCP servers...\n"); + + const sbx: Sandbox = await Sandbox.create({ + envs: { + ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY as string, + }, + mcp: { + notion: { + internalIntegrationToken: process.env + .NOTION_INTEGRATION_TOKEN as string, + }, + githubOfficial: { + githubPersonalAccessToken: process.env.GITHUB_TOKEN as string, + }, + }, + }); + + const mcpUrl = sbx.getMcpUrl(); + const mcpToken = await sbx.getMcpToken(); + + console.log("Sandbox created successfully!"); + console.log(`MCP Gateway URL: ${mcpUrl}\n`); + + // Wait for MCP initialization + await new Promise((resolve) => setTimeout(resolve, 1000)); + + // Connect Claude to MCP gateway + console.log("Connecting Claude to MCP gateway..."); + await sbx.commands.run( + `claude mcp add --transport http e2b-mcp-gateway ${mcpUrl} --header "Authorization: Bearer ${mcpToken}"`, + { + timeoutMs: 0, + onStdout: console.log, + onStderr: console.log, + }, + ); + + console.log("\nConnection successful! Cleaning up..."); + await sbx.kill(); +} + +quickstart().catch(console.error); +``` + +Run the script: + +```console +$ npx tsx index.ts +``` + +{{< /tab >}} +{{< tab name="Python">}} + +Create a file named `index.py`: + +```python +import os +import asyncio +from dotenv import load_dotenv +from e2b import Sandbox + +load_dotenv() + +async def quickstart(): + print("Creating E2B sandbox with Notion and GitHub MCP servers...\n") + + sbx = await Sandbox.beta_create( + envs={ + "ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY"), + }, + mcp={ + "notion": { + "internalIntegrationToken": os.getenv("NOTION_INTEGRATION_TOKEN"), + }, + "githubOfficial": { + "githubPersonalAccessToken": os.getenv("GITHUB_TOKEN"), + }, + }, + ) + + mcp_url = sbx.beta_get_mcp_url() + mcp_token = await sbx.beta_get_mcp_token() + + print("Sandbox created successfully!") + print(f"MCP Gateway URL: {mcp_url}\n") + + # Wait for MCP initialization + await asyncio.sleep(1) + + # Connect Claude to MCP gateway + print("Connecting Claude to MCP gateway...") + + def on_stdout(output): + print(output, end='') + + def on_stderr(output): + print(output, end='') + + await sbx.commands.run( + f'claude mcp add --transport http e2b-mcp-gateway {mcp_url} --header "Authorization: Bearer {mcp_token}"', + timeout_ms=0, + on_stdout=on_stdout, + on_stderr=on_stderr + ) + + print("\nConnection successful! Cleaning up...") + await sbx.kill() + +if __name__ == "__main__": + try: + asyncio.run(quickstart()) + except Exception as e: + print(f"Error: {e}") + +``` + +Run the script: + +```console +$ python index.py +``` + +{{< /tab >}} +{{}} + +You should see: + +```console +Creating E2B sandbox with Notion and GitHub MCP servers... + +Sandbox created successfully! +MCP Gateway URL: https://50005-xxxxx.e2b.app/mcp + +Connecting Claude to MCP gateway... +Added HTTP MCP server e2b-mcp-gateway with URL: https://50005-xxxxx.e2b.app/mcp + +Connection successful! Cleaning up... +``` + +### Test with example workflow + +Now, test the setup by running a simple workflow that searches Notion and creates a GitHub issue. + +{{< tabs group="" >}} +{{< tab name="Typescript">}} + +> [!IMPORTANT] +> +> Replace `owner/repo` in the prompt with your actual GitHub username and repository +> name (for example, `yourname/test-repo`). + +Update `index.ts` with the following example: + +```typescript +import "dotenv/config"; +import { Sandbox } from "e2b"; + +async function exampleWorkflow(): Promise { + console.log("Creating sandbox...\n"); + + const sbx: Sandbox = await Sandbox.create({ + envs: { + ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY as string, + }, + mcp: { + notion: { + internalIntegrationToken: process.env + .NOTION_INTEGRATION_TOKEN as string, + }, + githubOfficial: { + githubPersonalAccessToken: process.env.GITHUB_TOKEN as string, + }, + }, + }); + + const mcpUrl = sbx.getMcpUrl(); + const mcpToken = await sbx.getMcpToken(); + + console.log("Sandbox created successfully\n"); + + // Wait for MCP servers to initialize + await new Promise((resolve) => setTimeout(resolve, 3000)); + + console.log("Connecting Claude to MCP gateway...\n"); + await sbx.commands.run( + `claude mcp add --transport http e2b-mcp-gateway ${mcpUrl} --header "Authorization: Bearer ${mcpToken}"`, + { + timeoutMs: 0, + onStdout: console.log, + onStderr: console.log, + }, + ); + + console.log("\nRunning example: Search Notion and create GitHub issue...\n"); + + const prompt: string = `Using Notion and GitHub MCP tools: +1. Search my Notion workspace for databases +2. Create a test issue in owner/repo titled "MCP Toolkit Test" with description "Testing E2B + Docker MCP integration" +3. Confirm both operations completed successfully`; + + await sbx.commands.run( + `echo '${prompt.replace(/'/g, "'\\''")}' | claude -p --dangerously-skip-permissions`, + { + timeoutMs: 0, + onStdout: console.log, + onStderr: console.log, + }, + ); + + await sbx.kill(); +} + +exampleWorkflow().catch(console.error); +``` + +Run the script: + +```console +$ npx tsx index.ts +``` + +{{< /tab >}} +{{< tab name="Python">}} + +Update `index.py` with this example: + +> [!IMPORTANT] +> +> Replace `owner/repo` in the prompt with your actual GitHub username and repository +> name (for example, `yourname/test-repo`). + +```python +import os +import asyncio +import shlex +from dotenv import load_dotenv +from e2b import Sandbox + +load_dotenv() + +async def example_workflow(): + print("Creating sandbox...\n") + + sbx = await Sandbox.beta_create( + envs={ + "ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY"), + }, + mcp={ + "notion": { + "internalIntegrationToken": os.getenv("NOTION_INTEGRATION_TOKEN"), + }, + "githubOfficial": { + "githubPersonalAccessToken": os.getenv("GITHUB_TOKEN"), + }, + }, + ) + + mcp_url = sbx.beta_get_mcp_url() + mcp_token = await sbx.beta_get_mcp_token() + + print("Sandbox created successfully\n") + + # Wait for MCP servers to initialize + await asyncio.sleep(3) + + print("Connecting Claude to MCP gateway...\n") + + def on_stdout(output): + print(output, end='') + + def on_stderr(output): + print(output, end='') + + await sbx.commands.run( + f'claude mcp add --transport http e2b-mcp-gateway {mcp_url} --header "Authorization: Bearer {mcp_token}"', + timeout_ms=0, + on_stdout=on_stdout, + on_stderr=on_stderr + ) + + print("\nRunning example: Search Notion and create GitHub issue...\n") + + prompt = """Using Notion and GitHub MCP tools: +1. Search my Notion workspace for databases +2. Create a test issue in owner/repo titled "MCP Toolkit Test" with description "Testing E2B + Docker MCP integration" +3. Confirm both operations completed successfully""" + + # Escape single quotes for shell + escaped_prompt = prompt.replace("'", "'\\''") + + await sbx.commands.run( + f"echo '{escaped_prompt}' | claude -p --dangerously-skip-permissions", + timeout_ms=0, + on_stdout=on_stdout, + on_stderr=on_stderr + ) + + await sbx.kill() + +if __name__ == "__main__": + try: + asyncio.run(example_workflow()) + except Exception as e: + print(f"Error: {e}") +``` + +Run the script: + +```console +$ python workflow.py +``` + +{{< /tab >}} +{{}} + +You should see: + +```console +Creating sandbox... + +Running example: Search Notion and create GitHub issue... + +## Task Completed Successfully + +I've completed both operations using the Notion and GitHub MCP tools: + +### 1. Notion Workspace Search + +Found 3 databases in your Notion workspace: +- **Customer Feedback** - Database with 12 entries tracking feature requests +- **Product Roadmap** - Planning database with 8 active projects +- **Meeting Notes** - Shared workspace with 45 pages + +### 2. GitHub Issue Creation + +Successfully created test issue: +- **Repository**: your-org/your-repo +- **Issue Number**: #47 +- **Title**: "MCP Test" +- **Description**: "Testing E2B + Docker MCP integration" +- **Status**: Open +- **URL**: https://github.com/your-org/your-repo/issues/47 + +Both operations completed successfully. The MCP servers are properly configured and working. +``` + +The sandbox connected multiple MCP servers and orchestrated a workflow across Notion and GitHub. You can extend this pattern to combine any of the 200+ MCP servers in the Docker MCP Catalog. + +## Related pages + +- [How to build an AI-powered code quality workflow with SonarQube and E2B](/guides/github-sonarqube-sandbox.md) +- [Docker + E2B: Building the Future of Trusted AI](https://www.docker.com/blog/docker-e2b-building-the-future-of-trusted-ai/) +- [Docker Sandboxes](/manuals/ai/sandboxes/_index.md) +- [Docker MCP Toolkit and Catalog](/manuals/ai/mcp-catalog-and-toolkit/_index.md) +- [Docker MCP Gateway](/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md) +- [E2B MCP documentation](https://e2b.dev/docs/mcp) diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/faqs.md b/content/manuals/ai/mcp-catalog-and-toolkit/faqs.md new file mode 100644 index 00000000000..6d803b1c97d --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/faqs.md @@ -0,0 +1,114 @@ +--- +title: MCP Toolkit FAQs +linkTitle: FAQs +description: Frequently asked questions related to MCP Catalog and Toolkit security +keywords: MCP, Toolkit, MCP server, MCP client, security, faq +tags: [FAQ] +weight: 70 +--- + +Docker MCP Catalog and Toolkit is a solution for securely building, sharing, and +running MCP tools. This page answers common questions about MCP Catalog and Toolkit security. + +### What process does Docker follow to add a new MCP server to the catalog? + +Developers can submit a pull request to the [Docker MCP Registry](https://github.com/docker/mcp-registry) to propose new servers. Docker provides detailed [contribution guidelines](https://github.com/docker/mcp-registry/blob/main/CONTRIBUTING.md) to help developers meet the required standards. + +Currently, a majority of the servers in the catalog are built directly by Docker. Each server includes attestations such as: + +- Build attestation: Servers are built on Docker Build Cloud. +- Source provenance: Verifiable source code origins. +- Signed SBOMs: Software Bill of Materials with cryptographic signatures. + +> [!NOTE] +> When using the images with [Docker MCP gateway](/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md), +> you can verify attestations at runtime using the `docker mcp gateway run +--verify-signatures` CLI command. + + +In addition to Docker-built servers, the catalog includes select servers from trusted registries such as GitHub and HashiCorp. Each third-party server undergoes a verification process that includes: + +- Pulling and building the code in an ephemeral build environment. +- Testing initialization and functionality. +- Verifying that tools can be successfully listed. + +### Under what conditions does Docker reject MCP server submissions? + +Docker rejects MCP server submissions that fail automated testing and validation processes during pull request review. Additionally, Docker reviewers evaluate submissions against specific requirements and reject MCP servers that don't meet these criteria. + +### Does Docker take accountability for malicious MCP servers in the Toolkit? + +Docker’s security measures currently represent a best-effort approach. While Docker implements automated testing, scanning, and metadata extraction for each server in the catalog, these security measures are not yet exhaustive. Docker is actively working to enhance its security processes and expand testing coverage. Enterprise customers can contact their Docker account manager for specific security requirements and implementation details. + +### How are credentials managed for MCP servers? + +Starting with Docker Desktop version 4.43.0, credentials are stored securely in the Docker Desktop VM. The storage implementation depends on the platform (for example, macOS, WSL2). You can manage the credentials using the following CLI commands: + +- `docker mcp secret ls` - List stored credentials +- `docker mcp secret rm` - Remove specific credentials +- `docker mcp oauth revoke` - Revoke OAuth-based credentials + +In the upcoming versions of Docker Desktop, Docker plans to support pluggable storage for these secrets and additional out-of-the-box storage providers to give users more flexibility in managing credentials. + +### Are credentials removed when an MCP server is uninstalled? + +No. MCP servers are not technically uninstalled since they exist as Docker containers pulled to your local Docker Desktop. Removing an MCP server stops the container but leaves the image on your system. Even if the container is deleted, credentials remain stored until you remove them manually. + +### Why don't I see remote MCP servers in the catalog? + +If remote MCP servers aren't visible in the Docker Desktop catalog, your local +catalog may be out of date. Remote servers are indicated by a cloud icon and +include services like GitHub, Notion, and Linear. + +Update your catalog by running: + +```console +$ docker mcp catalog update +``` + +After the update completes, refresh the **Catalog** tab in Docker Desktop. + +### What's the difference between profiles and the catalog? + +The [catalog](/manuals/ai/mcp-catalog-and-toolkit/catalog.md) is the source of +available MCP servers - a library of tools you can choose from. +[Profiles](/manuals/ai/mcp-catalog-and-toolkit/profiles.md) are collections of +servers you've added to organize your work. Think of the catalog as a library, +and profiles as your personal bookshelves containing the books you've selected +for different purposes. + +### Can I share profiles with my team? + +Yes. Profiles can be pushed to OCI-compliant registries using +`docker mcp profile push my-profile registry.example.com/profiles/my-profile:v1`. +Team members can pull your profile with +`docker mcp profile pull registry.example.com/profiles/my-profile:v1`. Note +that credentials aren't included in shared profiles for security reasons - team +members need to configure OAuth and other credentials separately. + +### Do I need to create a profile to use MCP Toolkit? + +Yes, MCP Toolkit requires a profile to run servers. If you're upgrading from a +version before profiles were introduced, a default profile is automatically +created for you with your existing server configurations. You can create +additional named profiles to organize servers for different projects or +environments. + +### What happens to servers when I switch profiles? + +Each profile contains its own set of servers and configurations. When you run +the gateway with `--profile profile-name`, only servers in that profile are +available to clients. The default profile is used when no profile is specified. +Switching between profiles changes which servers your AI applications can +access. + +### Can I use the same server in multiple profiles? + +Yes. You can add the same MCP server to multiple profiles, each with different +configurations if needed. This is useful when you need the same server with +different settings for different projects or environments. + +## Related pages + +- [Get started with MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/get-started.md) +- [Open-source MCP Gateway](/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md) diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/get-started.md b/content/manuals/ai/mcp-catalog-and-toolkit/get-started.md new file mode 100644 index 00000000000..4c2f8e8e691 --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/get-started.md @@ -0,0 +1,388 @@ +--- +title: Get started with Docker MCP Toolkit +linkTitle: Get started +description: Learn how to quickly install and use the MCP Toolkit to set up servers and clients. +keywords: Docker MCP Toolkit, MCP server, MCP client, AI agents +weight: 5 +params: + test_prompt: Use the GitHub MCP server to show me my open pull requests +--- + +{{< summary-bar feature_name="Docker MCP Toolkit" >}} + +> [!NOTE] +> This page describes the MCP Toolkit interface in Docker Desktop 4.62 and +> later. Earlier versions have a different UI. Upgrade to follow these +> instructions exactly. + +The Docker MCP Toolkit makes it easy to set up, manage, and run containerized +Model Context Protocol (MCP) servers in profiles, and connect them to AI +agents. It provides secure defaults and support for a growing ecosystem of +LLM-based clients. This page shows you how to get started quickly with the +Docker MCP Toolkit. + +## Setup + +Before you begin, make sure you meet the following requirements to get started with Docker MCP Toolkit. + +1. Download and install the latest version of [Docker Desktop](/get-started/get-docker/). +2. Open the Docker Desktop settings and select **Beta features**. +3. Select **Enable Docker MCP Toolkit**. +4. Select **Apply**. + +The **Learning center** in Docker Desktop provides walkthroughs and resources +to help you get started with Docker products and features. On the **MCP +Toolkit** page, the **Get started** walkthrough guides you through installing +an MCP server, connecting a client, and testing your setup. + +Alternatively, follow the step-by-step instructions on this page: + +- [Create a profile](#create-a-profile) - Your workspace for organizing servers +- [Add MCP servers to your profile](#add-mcp-servers) - Select tools from the catalog +- [Connect clients](#connect-clients) - Link AI applications to your profile +- [Verify connections](#verify-connections) - Test that everything works + +Once configured, your AI applications can use all the servers in your profile. + +> [!TIP] +> Prefer working from the terminal? See [Use MCP Toolkit from the CLI](cli.md) +> for instructions on using the `docker mcp` commands. + +## Create a profile + +Profiles organize your MCP servers into collections. Create a profile for your +work: + +> [!NOTE] +> If you're upgrading from a previous version of MCP Toolkit, your existing +> server configurations are already in a `default` profile. You can continue +> using the default profile or create new profiles for different projects. + +1. In Docker Desktop, select **MCP Toolkit** and select the **Profiles** tab. +2. Select **Create profile**. +3. Enter a name for your profile (e.g., "Frontend development"). +4. Optionally, add servers and clients now, or add them later. +5. Select **Create**. + +Your new profile appears in the profiles list. + +## Add MCP servers + +1. In Docker Desktop, select **MCP Toolkit** and select the **Catalog** tab. +2. Browse the catalog and select the servers you want to add. +3. Select the **Add to** button and choose whether you want to add the servers + to an existing profile, or create a new profile. + +If a server requires configuration, a **Configuration Required** badge appears +next to the server's name. You must complete the mandatory configuration before +you can use the server. + +You've now successfully added MCP servers to your profile. Next, connect an MCP +client to use the servers in your profile. + +## Connect clients + +To connect a client to MCP Toolkit: + +1. In Docker Desktop, select **MCP Toolkit** and select the **Clients** tab. +2. Find your application in the list. +3. Select **Connect** to configure the client. + +If your client isn't listed, you can connect the MCP Toolkit manually over +`stdio` by configuring your client to run the gateway with your profile: + +```plaintext +docker mcp gateway run --profile my_profile +``` + +For example, if your client uses a JSON file to configure MCP servers, you may +add an entry like: + +```json {title="Example configuration" +{ + "servers": { + "MCP_DOCKER": { + "command": "docker", + "args": ["mcp", "gateway", "run", "--profile", "my_profile"], + "type": "stdio" + } + } +} +``` + +Consult the documentation of the application you're using for instructions on +how to set up MCP servers manually. + +## Verify connections + +Refer to the relevant section for instructions on how to verify that your setup +is working: + +- [Claude Code](#claude-code) +- [Claude Desktop](#claude-desktop) +- [OpenAI Codex](#codex) +- [Continue](#continue) +- [Cursor](#cursor) +- [Gemini](#gemini) +- [Goose](#goose) +- [LM Studio](#lm-studio) +- [OpenCode](#opencode) +- [Sema4.ai](#sema4) +- [Visual Studio Code](#vscode) +- [Zed](#zed) + +### Claude Code + +If you configured the MCP Toolkit for a specific project, navigate to the +relevant project directory. Then run `claude mcp list`. The output should show +`MCP_DOCKER` with a "connected" status: + +```console +$ claude mcp list +Checking MCP server health... + +MCP_DOCKER: docker mcp gateway run - ✓ Connected +``` + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```console +$ claude "{{% param test_prompt %}}" +``` + +### Claude Desktop + +Restart Claude Desktop and check the **Search and tools** menu in the chat +input. You should see the `MCP_DOCKER` server listed and enabled: + +![Claude Desktop](images/claude-desktop.avif) + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```plaintext +{{% param test_prompt %}} +``` + +### Codex + +Run `codex mcp list` to view active MCP servers and their statuses. The +`MCP_DOCKER` server should appear in the list with an "enabled" status: + +```console +$ codex mcp list +Name Command Args Env Cwd Status Auth +MCP_DOCKER docker mcp gateway run - - enabled Unsupported +``` + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```console +$ codex "{{% param test_prompt %}}" +``` + +### Continue + +Launch the Continue terminal UI by running `cn`. Use the `/mcp` command to view +active MCP servers and their statuses. The `MCP_DOCKER` server should appear in +the list with a "connected" status: + +```plaintext + MCP Servers + + ➤ 🟢 MCP_DOCKER (🔧75 📝3) + 🔄 Restart all servers + ⏹️ Stop all servers + 🔍 Explore MCP Servers + Back + + ↑/↓ to navigate, Enter to select, Esc to go back +``` + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```console +$ cn "{{% param test_prompt %}}" +``` + +### Cursor + +Open Cursor. If you configured the MCP Toolkit for a specific project, open the +relevant project directory. Then navigate to **Cursor Settings > Tools & MCP**. +You should see `MCP_DOCKER` under **Installed MCP Servers**: + +![Cursor](images/cursor.avif) + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```plaintext +{{% param test_prompt %}} +``` + +### Gemini + +Run `gemini mcp list` to view active MCP servers and their statuses. The +`MCP_DOCKER` should appear in the list with a "connected" status. + +```console +$ gemini mcp list +Configured MCP servers: + +✓ MCP_DOCKER: docker mcp gateway run (stdio) - Connected +``` + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```console +$ gemini "{{% param test_prompt %}}" +``` + +### Goose + +{{< tabs >}} +{{< tab name="Desktop app" >}} + +Open the Goose desktop application and select **Extensions** in the sidebar. +Under **Enabled Extensions**, you should see an extension named `Mcpdocker`: + +![Goose desktop app](images/goose.avif) + +{{< /tab >}} +{{< tab name="CLI" >}} + +Run `goose info -v` and look for an entry named `mcpdocker` under extensions. +The status should show `enabled: true`: + +```console +$ goose info -v +… + mcpdocker: + args: + - mcp + - gateway + - run + available_tools: [] + bundled: null + cmd: docker + description: The Docker MCP Toolkit allows for easy configuration and consumption of MCP servers from the Docker MCP Catalog + enabled: true + env_keys: [] + envs: {} + name: mcpdocker + timeout: 300 + type: stdio +``` + +{{< /tab >}} +{{< /tabs >}} + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```plaintext +{{% param "test_prompt" %}} +``` + +### LM Studio + +Restart LM Studio and start a new chat. Open the integrations menu and look for +an entry named `mcp/mcp-docker`. Use the toggle to enable the server: + +![LM Studio](images/lm-studio.avif) + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```plaintext +{{% param "test_prompt" %}} +``` + +### OpenCode + +The OpenCode configuration file (at `~/.config/opencode/opencode.json` by +default) contains the setup for MCP Toolkit: + +```json +{ + "mcp": { + "MCP_DOCKER": { + "type": "local", + "command": ["docker", "mcp", "gateway", "run"], + "enabled": true + } + }, + "$schema": "https://opencode.ai/config.json" +} +``` + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```console +$ opencode "{{% param "test_prompt" %}}" +``` + +### Sema4.ai Studio {#sema4} + +In Sema4.ai Studio, select **Actions** in the sidebar, then select the **MCP +Servers** tab. You should see Docker MCP Toolkit in the list: + +![Docker MCP Toolkit in Sema4.ai Studio](./images/sema4-mcp-list.avif) + +To use MCP Toolkit with Sema4.ai, add it as an agent action. Find the agent you +want to connect to the MCP Toolkit and open the agent editor. Select **Add +Action**, enable Docker MCP Toolkit in the list, then save your agent: + +![Editing an agent in Sema4.ai Studio](images/sema4-edit-agent.avif) + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```plaintext +{{% param test_prompt %}} +``` + +### Visual Studio Code {#vscode} + +Open Visual Studio Code. If you configured the MCP Toolkit for a specific +project, open the relevant project directory. Then open the **Extensions** +pane. You should see the `MCP_DOCKER` server listed under installed MCP +servers. + +![MCP_DOCKER installed in Visual Studio Code](images/vscode-extensions.avif) + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```plaintext +{{% param test_prompt %}} +``` + +### Zed + +Launch Zed and open agent settings: + +![Opening Zed agent settings from command palette](images/zed-cmd-palette.avif) + +Ensure that `MCP_DOCKER` is listed and enabled in the MCP Servers section: + +![MCP_DOCKER in Zed's agent settings](images/zed-agent-settings.avif) + +Test the connection by submitting a prompt that invokes one of your installed +MCP servers: + +```plaintext +{{% param test_prompt %}} +``` + +## Further reading + +- [MCP Profiles](/manuals/ai/mcp-catalog-and-toolkit/profiles.md) +- [MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md) +- [MCP Catalog](/manuals/ai/mcp-catalog-and-toolkit/catalog.md) +- [MCP Gateway](/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md) diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/hub-mcp.md b/content/manuals/ai/mcp-catalog-and-toolkit/hub-mcp.md new file mode 100644 index 00000000000..04c86f26242 --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/hub-mcp.md @@ -0,0 +1,261 @@ +--- +title: Docker Hub MCP server +linkTitle: Hub MCP server +description: The Docker Hub MCP Server makes Docker Hub image metadata accessible to LLMs for content discovery. +keywords: Docker Hub MCP Server, Hub MCP server, Hub MCP +weight: 60 +--- + +The Docker Hub MCP Server is a Model Context Protocol (MCP) server that +interfaces with Docker Hub APIs to make rich image metadata accessible to LLMs, +enabling intelligent content discovery and repository management. + +For more information about MCP concepts and how MCP servers work, see the [Docker MCP Catalog and Toolkit](index.md) overview page. + +## Key features + +- Advanced LLM context: Docker's MCP Server provides LLMs with detailed, structured context for Docker Hub images, enabling smarter, more relevant recommendations for developers, whether they're choosing a base image or automating CI/CD workflows. +- Natural language image discovery: Developers can find the right container image using natural language, no need to remember tags or repository names. Just describe what you need, and Docker Hub will return images that match your intent. +- Simplified repository management: Hub MCP Server enables agents to manage repositories through natural language fetching image details, viewing stats, searching content, and performing key operations quickly and easily. + +## Install Docker Hub MCP server + +1. From the **MCP Toolkit** menu, select the **Catalog** tab and search for **Docker Hub** and select the plus icon to add the Docker Hub MCP server. +1. In the server's **Configuration** tab, insert your Docker Hub username and personal access token (PAT). +1. In the **Clients** tab in MCP Toolkit, ensure Gordon is connected. +1. From the **Gordon** menu, you can now send requests related to your + Docker Hub account, in accordance to the tools provided by the Docker Hub MCP server. To test it, ask Gordon: + + ```text + What repositories are in my namespace? + ``` + +> [!TIP] +> By default, the Gordon [client](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md#install-an-mcp-client) is enabled, +> which means Gordon can automatically interact with your MCP servers. + +## Use Claude Desktop as a client + +1. Add the Docker Hub MCP Server configuration to your `claude_desktop_config.json`: + + {{< tabs >}} + {{< tab name="For public repositories only">}} + + ```json + { + "mcpServers": { + "docker-hub": { + "command": "node", + "args": ["/FULL/PATH/TO/YOUR/docker-hub-mcp-server/dist/index.js", "--transport=stdio"] + } + } + } + ``` + + Where : + - `/FULL/PATH/TO/YOUR/docker-hub-mcp-server` is the complete path to where you cloned the repository + + {{< /tab >}} + {{< tab name="For authenticated access">}} + + ```json + { + "mcpServers": { + "docker-hub": { + "command": "node", + "args": ["/FULL/PATH/TO/YOUR/docker-hub-mcp-server/dist/index.js", "--transport=stdio", "--username=YOUR_DOCKER_HUB_USERNAME"], + "env": { + "HUB_PAT_TOKEN": "YOUR_DOCKER_HUB_PERSONAL_ACCESS_TOKEN" + } + } + } + } + ``` + + Where : + - `YOUR_DOCKER_HUB_USERNAME` is your Docker Hub username. + - `YOUR_DOCKER_HUB_PERSONAL_ACCESS_TOKEN` is Docker Hub personal access token + - `/FULL/PATH/TO/YOUR/docker-hub-mcp-server` is the complete path to where you cloned the repository + + + {{< /tab >}} + {{}} + +1. Save the configuration file and completely restart Claude Desktop for the changes to take effect. + +## Usage with Visual Studio Code + +1. Add the Docker Hub MCP Server configuration to your User Settings (JSON) + file in Visual Studio Code. You can do this by opening the `Command Palette` and + typing `Preferences: Open User Settings (JSON)`. + + + {{< tabs >}} + {{< tab name="For public repositories only">}} + + ```json + { + "mcpServers": { + "docker-hub": { + "command": "node", + "args": ["/FULL/PATH/TO/YOUR/docker-hub-mcp-server/dist/index.js", "--transport=stdio"] + } + } + } + ``` + + Where : + - `/FULL/PATH/TO/YOUR/docker-hub-mcp-server` is the complete path to where you cloned the repository + + {{< /tab >}} + {{< tab name="For authenticated access">}} + + ```json + { + "mcpServers": { + "docker-hub": { + "command": "node", + "args": ["/FULL/PATH/TO/YOUR/docker-hub-mcp-server/dist/index.js", "--transport=stdio"], + "env": { + "HUB_USERNAME": "YOUR_DOCKER_HUB_USERNAME", + "HUB_PAT_TOKEN": "YOUR_DOCKER_HUB_PERSONAL_ACCESS_TOKEN" + } + } + } + } + ``` + + Where : + - `YOUR_DOCKER_HUB_USERNAME` is your Docker Hub username. + - `YOUR_DOCKER_HUB_PERSONAL_ACCESS_TOKEN` is Docker Hub personal access token + - `/FULL/PATH/TO/YOUR/docker-hub-mcp-server` is the complete path to where you cloned the repository + + + {{< /tab >}} + {{}} + +1. Open the `Command Palette` and type `MCP: List Servers`. +1. Select `docker-hub` and select `Start Server`. + +## Using other clients + +To integrate the Docker Hub MCP Server into your own development +environment, see the source code and installation instructions on the +[`hub-mcp` GitHub repository](https://github.com/docker/hub-mcp). + + +## Usage examples + +This section provides task-oriented examples for common operations with Docker Hub +tools. + +### Finding images + + +```console +# Search for official images +$ docker ai "Search for official nginx images on Docker Hub" + +# Search for lightweight images to reduce deployment size and improve performance +$ docker ai "Search for minimal Node.js images with small footprint" + +# Get the most recent tag of a base image +$ docker ai "Show me the latest tag details for go" + +# Find a production-ready database with enterprise features and reliability +$ docker ai "Search for production ready database images" + +# Compare Ubuntu versions to choose the right one for my project +$ docker ai "Help me find the right Ubuntu version for my project" +``` + +### Repository management + +```console +# Create a repository +$ docker ai "Create a repository in my namespace" + +# List all repositories in my namespace +$ docker ai "List all repositories in my namespace" + +# Find the largest repository in my namespace +$ docker ai "Which of my repositories takes up the most space?" + +# Find repositories that haven't been updated recently +$ docker ai "Which of my repositories haven't had any pushes in the last 60 days?" + +# Find which repositories are currently active and being used +$ docker ai "Show me my most recently updated repositories" + +# Get details about a repository +$ docker ai "Show me information about my '' repository" +``` + +### Pull/push images + + +```console +# Pull latest PostgreSQL version +$ docker ai "Pull the latest postgres image" + +# Push image to your Docker Hub repository +$ docker ai "Push my to my repository" +``` + +### Tag management + +```console +# List all tags for a repository +$ $ docker ai "Show me all tags for my '' repository" + +# Find the most recently pushed tag +$ docker ai "What's the most recent tag pushed to my '' repository?" + +# List tags with architecture filtering +$ docker ai "List tags for in the '' repository that support amd64 architecture" + +# Get detailed information about a specific tag +$ docker ai "Show me details about the '' tag in the '' repository" + +# Check if a specific tag exists +$ docker ai "Check if version 'v1.2.0' exists for my 'my-web-app' repository" +``` + +### Docker Hardened Images + +```console +# List available hardened images +$ docker ai "What is the most secure image I can use to run a node.js application?" + +# Convert Dockerfile to use a hardened image +$ docker ai "Can you help me update my Dockerfile to use a docker hardened image instead of the current one" +``` +> [!NOTE] +> To access Docker Hardened Images, a subscription is required. If you're interested in using Docker Hardened Images, visit [Docker Hardened Images](https://www.docker.com/products/hardened-images/). + + +## Reference + +This section provides a comprehensive listing of the tools you can find +in the Docker Hub MCP Server. + +### Docker Hub MCP server tools + +Tools to interact with your Docker repositories and discover content on Docker Hub. + +| Name | Description | +|------|-------------| +| `check-repository` | Check repository | +| `check-repository-tag` | Check repository tag | +| `check-repository-tags` | Check repository tags | +| `create-repository` | Creates a new repository | +| `docker-hardened-images` | Lists available [Docker Hardened Images](https://www.docker.com/products/hardened-images/) in specified namespace | +| `get-namespaces` | Get organizations/namespaces for a user | +| `get-repository-dockerfile` | Gets Dockerfile for repository | +| `get-repository-info` | Gets repository info | +| `list-repositories-by-namespace` | Lists repositories under namespace | +| `list-repository-tags` | List repository tags | +| `read-repository-tag` | Read repository tag | +| `search` | Search content on Docker Hub | +| `set-repository-dockerfile` | Sets Dockerfile for repository | +| `update-repository-info` | Updates repository info | diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/ask-gordon.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/ask-gordon.avif new file mode 100644 index 00000000000..9bd35d4f3f7 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/ask-gordon.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/claude-desktop.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/claude-desktop.avif new file mode 100644 index 00000000000..e574f487409 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/claude-desktop.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/copilot-mode.png b/content/manuals/ai/mcp-catalog-and-toolkit/images/copilot-mode.png new file mode 100644 index 00000000000..9ce6e961c5c Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/copilot-mode.png differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/cursor.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/cursor.avif new file mode 100644 index 00000000000..f092705f87b Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/cursor.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/goose.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/goose.avif new file mode 100644 index 00000000000..3753df01219 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/goose.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/lm-studio.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/lm-studio.avif new file mode 100644 index 00000000000..b317ee569da Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/lm-studio.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/mcp_toolkit.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/mcp_toolkit.avif new file mode 100644 index 00000000000..2b33fe67314 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/mcp_toolkit.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/sema4-edit-agent.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/sema4-edit-agent.avif new file mode 100644 index 00000000000..b143bee1267 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/sema4-edit-agent.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/sema4-mcp-list.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/sema4-mcp-list.avif new file mode 100644 index 00000000000..92f7a0617e5 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/sema4-mcp-list.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/tools.png b/content/manuals/ai/mcp-catalog-and-toolkit/images/tools.png new file mode 100644 index 00000000000..4439dc4b5e1 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/tools.png differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/vscode-extensions.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/vscode-extensions.avif new file mode 100644 index 00000000000..9cd229474f2 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/vscode-extensions.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/zed-agent-settings.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/zed-agent-settings.avif new file mode 100644 index 00000000000..6df7e69785e Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/zed-agent-settings.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/zed-cmd-palette.avif b/content/manuals/ai/mcp-catalog-and-toolkit/images/zed-cmd-palette.avif new file mode 100644 index 00000000000..540dc673106 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/zed-cmd-palette.avif differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md b/content/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md new file mode 100644 index 00000000000..f88d16e2b75 --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md @@ -0,0 +1,94 @@ +--- +title: MCP Gateway +linkTitle: Gateway +description: "Docker's MCP Gateway provides secure, centralized, and scalable orchestration of AI tools through containerized MCP servers, empowering developers, operators, and security teams." +keywords: MCP Gateway +weight: 40 +aliases: + - /ai/mcp-gateway/ +--- + +The MCP Gateway is Docker's open source solution for orchestrating Model +Context Protocol (MCP) servers. It acts as a centralized proxy between clients +and servers, managing configuration, credentials, and access control. + +When using MCP servers without the MCP Gateway, you need to configure +applications individually for each AI application. With the MCP Gateway, you +configure applications to connect to the Gateway. The Gateway then handles +server lifecycle, routing, and authentication across all servers in your +[profiles](/manuals/ai/mcp-catalog-and-toolkit/profiles.md). + +> [!NOTE] +> If you use Docker Desktop with MCP Toolkit enabled, the Gateway runs +> automatically in the background. You don't need to start or configure it +> manually. This documentation is for users who want to understand how the +> Gateway works or run it directly for advanced use cases. + +> [!TIP] +> E2B sandboxes now include direct access to the Docker MCP Catalog, giving developers +> access to over 200 tools and services to seamlessly build and run AI agents. For +> more information, see [E2B Sandboxes](sandboxes.md). + +## How it works + +MCP Gateway runs MCP servers in isolated Docker containers with restricted +privileges, network access, and resource usage. It includes built-in logging +and call-tracing capabilities to ensure full visibility and governance of AI +tool activity. + +The MCP Gateway manages the server's entire lifecycle. When an AI application +needs to use a tool, it sends a request to the Gateway. The Gateway identifies +which server handles that tool and, if the server isn't already running, starts +it as a Docker container. The Gateway then injects any required credentials, +applies security restrictions, and forwards the request to the server. The +server processes the request and returns the result through the Gateway back to +the AI application. + +The MCP Gateway solves a fundamental problem: MCP servers are just programs +that need to run somewhere. Running them directly on your machine means dealing +with installation, dependencies, updates, and security risks. By running them +as containers managed by the Gateway, you get isolation, consistent +environments, and centralized control. + +The Gateway works with profiles to determine which servers are available. When +you run the Gateway, you specify which profile to use with the `--profile` flag +to determine which servers are made available to clients. + +## Usage + +To use the MCP Gateway, you'll need Docker Desktop with MCP Toolkit enabled. +Follow the [MCP Toolkit guide](toolkit.md) to enable and configure servers +through the Docker Desktop interface, or see +[Use MCP Toolkit from the CLI](cli.md) for terminal-based workflows. + +### Install the MCP Gateway manually + +For Docker Engine without Docker Desktop, you'll need to download and install +the MCP Gateway separately before you can run it. + +1. Download the latest binary from the [GitHub releases page](https://github.com/docker/mcp-gateway/releases/latest). + +2. Move or symlink the binary to the destination matching your OS: + + | OS | Binary destination | + | ------- | ----------------------------------- | + | Linux | `~/.docker/cli-plugins/docker-mcp` | + | macOS | `~/.docker/cli-plugins/docker-mcp` | + | Windows | `%USERPROFILE%\.docker\cli-plugins` | + +3. Make the binaries executable: + + ```bash + $ chmod +x ~/.docker/cli-plugins/docker-mcp + ``` + +You can now use the `docker mcp` command: + +```bash +docker mcp --help +``` + +## Additional information + +For more details on how the MCP Gateway works and available customization +options, see the complete documentation [on GitHub](https://github.com/docker/mcp-gateway). diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/profiles.md b/content/manuals/ai/mcp-catalog-and-toolkit/profiles.md new file mode 100644 index 00000000000..690db777547 --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/profiles.md @@ -0,0 +1,262 @@ +--- +title: MCP Profiles +linkTitle: Profiles +description: Organize MCP servers into profiles for different projects and environments +keywords: Docker MCP, profiles, MCP servers, configuration, sharing +weight: 25 +--- + +{{< summary-bar feature_name="MCP Profiles" >}} + +Profiles organize your MCP servers into named collections. Without profiles, +you'd configure servers separately for every AI application you use. Each time +you want to change which servers are available, you'd update Claude Desktop, VS +Code, Cursor, and other tools individually. Profiles solve this by centralizing +your server configurations. + +## What profiles do + +A profile is a named collection of MCP servers with their configurations and +settings. You select servers from the [MCP +Catalog](/manuals/ai/mcp-catalog-and-toolkit/catalog.md) (the source of +available servers) and add them to your profiles (your configured server +collections for specific work). Think of the catalog as a library of tools, and +profiles as your toolboxes organized for different jobs. + +Your "web-dev" profile might include GitHub, Playwright, and database servers. +Your "data-analysis" profile might include spreadsheet, API, and visualization +servers. Connect different AI clients to different profiles, or switch between +profiles as you change tasks. + +When you run the MCP Gateway or connect a client without specifying a profile, +Docker MCP uses your default profile. If you're upgrading from a previous +version of MCP Toolkit, your existing server configurations are already in the +default profile. + +## Profile capabilities + +Each profile maintains its own isolated collection of servers and +configurations. Your "web-dev" profile might include GitHub, Playwright, and +database servers, while your "data-analysis" profile includes spreadsheet, API, +and visualization servers. Create as many profiles as you need, each containing +only the servers relevant to that context. + +You can connect different AI applications to different profiles. When you +connect a client, you specify which profile it should use. This means Claude +Desktop and VS Code can have access to different server collections if needed. + +Profiles can be shared with your team. Push a profile to your registry, and +team members can pull it to get the exact same server collection and +configuration you use. + +## Creating and managing profiles + +### Create a profile + +1. In Docker Desktop, select **MCP Toolkit** and select the **Profiles** tab. +2. Select **Create profile**. +3. Enter a name for your profile (e.g., "web-dev"). +4. Optionally, search and add servers to your profile now, or add them later. +5. Optionally, search and add clients to connect to your profile. +6. Select **Create**. + +Your new profile appears in the profiles list. + +### View profile details + +Select a profile in the **Profiles** tab to view its details. The profile view +has two tabs: + +- **Overview**: Shows the servers in your profile, secrets configuration, and + connected clients. Use the **+** buttons to add more servers or clients. +- **Tools**: Lists all available tools from your profile's servers. You can + enable or disable individual tools. + +### Remove a profile + +1. In the **Profiles** tab, find the profile you want to remove. +2. Select ⋮ next to the profile name, and then **Delete**. +3. Confirm the removal. + +> [!CAUTION] +> Removing a profile deletes all its server configurations and settings, and +> updates the client configuration (removes MCP Toolkit). This action can't be +> undone. + +### Default profile + +When you run the MCP Gateway or use MCP Toolkit without specifying a profile, +Docker MCP uses a profile named `default`, or an empty configuration if a +`default` profile does not exist. + +If you're upgrading from a previous version of MCP Toolkit, your existing +server configurations automatically migrate to the `default` profile. You don't +need to manually recreate your setup - everything continues to work as before. + +You can always specify a different profile using the `--profile` flag with the +gateway command: + +```console +$ docker mcp gateway run --profile web-dev +``` + +## Adding servers to profiles + +Profiles contain the MCP servers you select from the catalog. Add servers to +organize your tools for specific workflows. + +### Add a server + +You can add servers to a profile in two ways. + +From the Catalog tab: + +1. Select the **Catalog** tab. +2. Select the checkbox next to servers you want to add to see which profile to + add them to. +3. Choose your profile from the drop-down. + +From within a profile: + +1. Select the **Profiles** tab and select your profile. +2. In the **Servers** section, select the **+** button. +3. Search for and select servers to add. + +If a server requires OAuth authentication, you're prompted to authorize it. See +[OAuth authentication](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md#oauth-authentication) +for details. + +### List servers in a profile + +Select a profile in the **Profiles** tab to see all servers it contains. + +### Remove a server + +1. Select the **Profiles** tab and select your profile. +2. In the **Servers** section, find the server you want to remove. +3. Select the delete icon next to the server. + +## Configuring profiles + +### Server configuration + +Some servers require configuration beyond authentication. Configure server +settings within your profile. + +1. Select the **Profiles** tab and select your profile. +2. In the **Servers** section, select the configure icon next to the server. +3. Adjust the server's configuration settings as needed. + +### OAuth credentials + +OAuth credentials are shared across all profiles. When you authorize access to +a service like GitHub or Notion, that authorization is available to any server +in any profile that needs it. + +This means all profiles use the same OAuth credentials for a given service. If +you need to use different accounts for different projects, you'll need to +revoke and re-authorize between switching profiles. + +See [OAuth authentication](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md#oauth-authentication) +for details on authorizing servers. + +### Configuration persistence + +Profile configurations persist in your Docker installation. When you restart +Docker Desktop or your system, your profiles, servers, and configurations +remain intact. + +## Sharing profiles + +Profiles can be shared with your team by pushing them to OCI-compliant +registries as artifacts. This is useful for distributing standardized MCP +setups across your organization. Credentials are not included in shared +profiles for security reasons. Team members configure OAuth separately after +pulling. + +### Push a profile + +1. Select the profile you want to share in the **Profiles** tab. +2. Select **Push to Registry**. +3. Enter the registry destination (e.g., `registry.example.com/profiles/web-dev:v1`). +4. Complete authentication if required. + +### Pull a profile + +1. Select **Pull from Registry** in the **Profiles** tab. +2. Enter the registry reference (e.g., `registry.example.com/profiles/team-standard:latest`). +3. Complete authentication if required. + +The profile is downloaded and added to your profiles list. Configure any +required OAuth credentials separately. + +### Team collaboration workflow + +A typical workflow for sharing profiles across a team: + +1. Create and configure a profile with the servers your team needs. +2. Test the profile to ensure it works as expected. +3. Push the profile to your team's registry with a version tag (e.g., + `registry.example.com/profiles/team-dev:v1`). +4. Share the registry reference with your team. +5. Team members pull the profile and configure any required OAuth credentials. + +This ensures everyone uses the same server collection and configuration, +reducing setup time and inconsistencies. + +## Using profiles with clients + +When you connect an AI client to the MCP Gateway, you specify which profile's +servers the client can access. + +### Run the gateway with a profile + +Connect clients to your profile through the **Clients** section in the MCP +Toolkit. You can add clients when creating a profile or add them to existing +profiles later. + +### Configure clients for specific profiles + +When setting up a client manually, you can specify which profile the client +uses. This lets different clients connect to different profiles. + +For example, your Claude Desktop configuration might use: + +```json +{ + "mcpServers": { + "MCP_DOCKER": { + "command": "docker", + "args": ["mcp", "gateway", "run", "--profile", "claude-work"] + } + } +} +``` + +While your VS Code configuration uses a different profile: + +```json +{ + "mcp": { + "servers": { + "MCP_DOCKER": { + "command": "docker", + "args": ["mcp", "gateway", "run", "--profile", "vscode-dev"], + "type": "stdio" + } + } + } +} +``` + +### Switching between profiles + +To switch the profile your clients use, update the client configuration to +specify a different `--profile` value in the gateway command arguments. + +## Further reading + +- [Get started with MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/get-started.md) +- [Use MCP Toolkit from the CLI](/manuals/ai/mcp-catalog-and-toolkit/cli.md) +- [MCP Catalog](/manuals/ai/mcp-catalog-and-toolkit/catalog.md) +- [MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md) diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/toolkit.md b/content/manuals/ai/mcp-catalog-and-toolkit/toolkit.md index 3af9971dbab..7a851c17eff 100644 --- a/content/manuals/ai/mcp-catalog-and-toolkit/toolkit.md +++ b/content/manuals/ai/mcp-catalog-and-toolkit/toolkit.md @@ -1,53 +1,199 @@ --- -title: MCP Toolkit -description: -keywords: +title: Docker MCP Toolkit +linkTitle: Toolkit UI +description: Use the MCP Toolkit to set up MCP servers and MCP clients. +keywords: Docker MCP Toolkit, MCP server, MCP client, AI agents +weight: 30 +aliases: + - /desktop/features/gordon/mcp/gordon-mcp-server/ + - /ai/gordon/mcp/gordon-mcp-server/ --- -The Docker MCP Toolkit is a Docker Desktop extension local that enables seamless setup, management, and execution of containerized MCP servers and their connections to AI agents. It removes the friction from tool usage by offering secure defaults, one-click setup, and support for a growing ecosystem of LLM-based clients. It is the fastest path from MCP tool discovery to local execution. +{{< summary-bar feature_name="Docker MCP Toolkit" >}} + +> [!NOTE] +> This page describes the MCP Toolkit interface in Docker Desktop 4.62 and +> later. Earlier versions have a different UI. Upgrade to follow these +> instructions exactly. + +The Docker MCP Toolkit is a management interface integrated into Docker Desktop +that lets you set up, manage, and run containerized MCP servers in profiles and +connect them to AI agents. It removes friction from tool usage by offering +secure defaults, easy setup, and support for a growing ecosystem of LLM-based +clients. It is the fastest way from MCP tool discovery to local execution. ## Key features -- Cross-LLM compatibility: Works out of the box with Claude Desktop, Cursor, Continue.dev, and [Gordon](/manuals/ai/gordon/_index.md). -- Integrated tool discovery: Browse and launch MCP servers that are available in the Docker MCP Catalog, directly from Docker Desktop. -- No manual setup: Skip dependency management, runtime setup, and manual server configuration. +- Cross-LLM compatibility: Works with Claude, Cursor, and other MCP clients. +- Integrated tool discovery: Browse and launch MCP servers from the Docker MCP Catalog directly in Docker Desktop. +- Zero manual setup: No dependency management, runtime configuration, or setup required. +- Profile-based organization: Create separate server collections for different projects or environments. +- Organizes MCP servers into profiles, acting as a gateway for clients to access the servers in each profile. -## How it works +> [!TIP] +> The MCP Toolkit includes [Dynamic MCP](/manuals/ai/mcp-catalog-and-toolkit/dynamic-mcp.md), +> which enables AI agents to discover, add, and compose MCP servers on-demand during +> conversations, without manual configuration. Your agent can search the catalog and +> add tools as needed when you connect to the gateway. -The **MCP Servers** tab lists all available servers from the Docker MCP Catalog. Each entry includes: +## How the MCP Toolkit works -- Tool name and description -- Partner/publisher -- Number of callable tools and what they are +MCP introduces two core concepts: MCP clients and MCP servers. -To enable an MCP server, simply use the toggle switch to toggle it on. +- MCP clients are typically embedded in LLM-based applications, such as the + Claude Desktop app. They request resources or actions. +- MCP servers are launched by the client to perform the requested tasks, using + any necessary tools, languages, or processes. -> [!NOTE] -> -> Some MCP servers requires secrets or tokens to be configured before it can be enabled. Instructions on how to do this can be found on each MCP servers' repository. +Docker standardizes the development, packaging, and distribution of +applications, including MCP servers. By packaging MCP servers as containers, +Docker eliminates issues related to isolation and environment differences. You +can run a container directly, without managing dependencies or configuring +runtimes. + +Depending on the MCP server, the tools it provides might run within the same +container as the server or in dedicated containers for better isolation. + +The MCP Toolkit organizes servers into profiles: named collections of servers +with their configurations. This lets you maintain different server setups for +different projects or environments. When you connect a client, you specify +which profile it should use. + +## Security + +The Docker MCP Toolkit combines passive and active measures to reduce attack +surfaces and ensure safe runtime behavior. + +### Passive security + +Passive security refers to measures implemented at build-time, when the MCP +server code is packaged into a Docker image. + +- Image signing and attestation: All MCP server images under `mcp/` in the [MCP + Catalog](catalog.md) are built by Docker and digitally signed to verify their + source and integrity. Each image includes a Software Bill of Materials (SBOM) + for full transparency. + +### Active security + +Active security refers to security measures at runtime, before and after tools +are invoked, enforced through resource and access limitations. + +- CPU allocation: MCP tools are run in their own container. They are + restricted to 1 CPU, limiting the impact of potential misuse of computing + resources. + +- Memory allocation: Containers for MCP tools are limited to 2 GB. + +- Filesystem access: By default, MCP Servers have no access to the host filesystem. + The user explicitly selects the servers that will be granted file mounts. + +- Interception of tool requests: Requests to and from tools that contain sensitive + information such as secrets are blocked. -The **MCP Clients** tab lets you connect your enabled MCP servers to supported agents. Connection is as simple as selecting **Connect**, so you can switch between LLM providers without altering your MCP server integrations or security configurations. +### OAuth authentication -## Installation +Some MCP servers require authentication to access external services like +GitHub, Notion, and Linear. The MCP Toolkit handles OAuth authentication +automatically. You authorize access through your browser, and the Toolkit +manages credentials securely. You don't need to manually create API tokens or +configure authentication for each service. -To install the Docker MCP Toolkit extension: +#### Authorize a server with OAuth -1. In the Docker Desktop Dashboard, select the **Extensions** view, and then select **Manage**. -2. Select the **Browse** tab and search for **Docker MCP Toolkit**. -3. On the **Docker MCP Toolkit** result, select install. +1. In Docker Desktop, go to **MCP Toolkit** and select the **Catalog** tab. +2. Find and add an MCP server that requires OAuth. +3. In the server's **Configuration** tab, select the **OAuth** authentication + method. Follow the link to begin the OAuth authorization. +4. Your browser opens the authorization page for the service. Follow the + on-screen instructions to complete authentication. +5. Return to Docker Desktop when authentication is complete. -The extension then appears under the **My extensions** tab. +View all authorized services in the **OAuth** tab. To revoke access, select +**Revoke** next to the service you want to disconnect. -### Example +## Usage examples -The following example assumes you have already installed and set up Claude Desktop. +### Example: Use Claude Desktop as a client -1. In the Docker MCP Toolkit extension, search for the Puppeteer MCP server in the **MCP Servers** tab, and toggle it on to enable. -2. From the **MCP Clients** tab, select the **Connect** button for Claude Desktop. -3. Within Claude Desktop, submit the following prompt using the Sonnet 3.5 model: +Imagine you have Claude Desktop installed, and you want to use the GitHub MCP +server and the Puppeteer MCP server. You do not have to install the servers in +Claude Desktop. You can add these 2 MCP servers to your profile in the MCP +Toolkit and connect Claude Desktop as a client: + +1. From the **MCP Toolkit** menu, select the **Catalog** tab and find the **Puppeteer** server and add it to your profile. +1. Repeat for the **GitHub Official** server. +1. From the **Clients** tab, select **Connect** next to **Claude Desktop**. Restart + Claude Desktop if it's running, and it can now access all the servers in the MCP Toolkit. +1. Within Claude Desktop, run a test by submitting the following prompt using the Sonnet 3.5 model: ```text Take a screenshot of docs.docker.com and then invert the colors ``` -Once you've given your consent to use the new tools, Claude spins up the Puppeteer MCP server inside a container, navigates to the target URL, captures and modify the page, and returns the screenshot. \ No newline at end of file +### Example: Use Visual Studio Code as a client + +You can interact with all your installed MCP servers in Visual Studio Code: + +1. To enable the MCP Toolkit: + + {{< tabs group="" >}} + {{< tab name="Enable globally">}} + 1. Insert the following in your Visual Studio Code's User `mcp.json`: + + ```json + "mcp": { + "servers": { + "MCP_DOCKER": { + "command": "docker", + "args": [ + "mcp", + "gateway", + "run", + "--profile", + "my_profile" + ], + "type": "stdio" + } + } + } + ``` + + {{< /tab >}} + {{< tab name="Enable for a given project">}} + 1. In your terminal, navigate to your project's folder. + 1. Run: + + ```bash + docker mcp client connect vscode --profile my_profile + ``` + + > [!NOTE] + > This command creates a `.vscode/mcp.json` file in the current directory + > that connects VSCode to your profile. As this is a user-specific file, + > add it to your `.gitignore` file to prevent it from being committed to + > the repository. + > + > ```console + > echo ".vscode/mcp.json" >> .gitignore + > ``` + +{{< /tab >}} +{{}} + +1. In Visual Studio Code, open a new Chat and select the **Agent** mode: + + ![Copilot mode switching](./images/copilot-mode.png) + +1. You can also check the available MCP tools: + + ![Displaying tools in VSCode](./images/tools.png) + +For more information about the Agent mode, see the +[Visual Studio Code documentation](https://code.visualstudio.com/docs/copilot/chat/mcp-servers#_use-mcp-tools-in-agent-mode). + +## Further reading + +- [Use MCP Toolkit from the CLI](/manuals/ai/mcp-catalog-and-toolkit/cli.md) +- [MCP Catalog](/manuals/ai/mcp-catalog-and-toolkit/catalog.md) +- [MCP Gateway](/manuals/ai/mcp-catalog-and-toolkit/mcp-gateway.md) diff --git a/content/manuals/ai/model-runner.md b/content/manuals/ai/model-runner.md deleted file mode 100644 index 65fe8562285..00000000000 --- a/content/manuals/ai/model-runner.md +++ /dev/null @@ -1,382 +0,0 @@ ---- -title: Docker Model Runner -params: - sidebar: - badge: - color: blue - text: Beta - group: AI -weight: 20 -description: Learn how to use Docker Model Runner to manage and run AI models. -keywords: Docker, ai, model runner, docker deskotp, llm -aliases: - - /desktop/features/model-runner/ - - /ai/model-runner/ ---- - -{{< summary-bar feature_name="Docker Model Runner" >}} - -The Docker Model Runner plugin lets you: - -- [Pull models from Docker Hub](https://hub.docker.com/u/ai) -- Run AI models directly from the command line -- Manage local models (add, list, remove) -- Interact with models using a submitted prompt or in chat mode in the CLI or Docker Desktop Dashboard -- Push models to Docker Hub - -Models are pulled from Docker Hub the first time they're used and stored locally. They're loaded into memory only at runtime when a request is made, and unloaded when not in use to optimize resources. Since models can be large, the initial pull may take some time — but after that, they're cached locally for faster access. You can interact with the model using [OpenAI-compatible APIs](#what-api-endpoints-are-available). - -> [!TIP] -> -> Using Testcontainers or Docker Compose? [Testcontainers for Java](https://java.testcontainers.org/modules/docker_model_runner/) and [Go](https://golang.testcontainers.org/modules/dockermodelrunner/), and [Docker Compose](/manuals/compose/how-tos/model-runner.md) now support Docker Model Runner. - -## Enable Docker Model Runner - -1. Navigate to the **Features in development** tab in settings. -2. Under the **Experimental features** tab, select **Access experimental features**. -3. Select **Apply and restart**. -4. Quit and reopen Docker Desktop to ensure the changes take effect. -5. Open the **Settings** view in Docker Desktop. -6. Navigate to **Features in development**. -7. From the **Beta** tab, check the **Enable Docker Model Runner** setting. - -You can now use the `docker model` command in the CLI and view and interact with your local models in the **Models** tab in the Docker Desktop Dashboard. - -## Available commands - -### Model runner status - -Check whether the Docker Model Runner is active and displays the current inference engine: - -```console -$ docker model status -``` - -### View all commands - -Displays help information and a list of available subcommands. - -```console -$ docker model help -``` - -Output: - -```text -Usage: docker model COMMAND - -Commands: - list List models available locally - pull Download a model from Docker Hub - rm Remove a downloaded model - run Run a model interactively or with a prompt - status Check if the model runner is running - version Show the current version -``` - -### Pull a model - -Pulls a model from Docker Hub to your local environment. - -```console -$ docker model pull -``` - -Example: - -```console -$ docker model pull ai/smollm2 -``` - -Output: - -```text -Downloaded: 257.71 MB -Model ai/smollm2 pulled successfully -``` - -The models also display in the Docker Desktop Dashboard. - -### List available models - -Lists all models currently pulled to your local environment. - -```console -$ docker model list -``` - -You will see something similar to: - -```text -+MODEL PARAMETERS QUANTIZATION ARCHITECTURE MODEL ID CREATED SIZE -+ai/smollm2 361.82 M IQ2_XXS/Q4_K_M llama 354bf30d0aa3 3 days ago 256.35 MiB -``` - -### Run a model - -Run a model and interact with it using a submitted prompt or in chat mode. When you run a model, Docker -calls an Inference Server API endpoint hosted by the Model Runner through Docker Desktop. The model -stays in memory until another model is requested, or until a pre-defined inactivity timeout is reached (currently 5 minutes). - -You do not have to use `Docker model run` before interacting with a specific model from a -host process or from within a container. Model Runner transparently loads the requested model on-demand, assuming it has been -pulled beforehand and is locally available. - -#### One-time prompt - -```console -$ docker model run ai/smollm2 "Hi" -``` - -Output: - -```text -Hello! How can I assist you today? -``` - -#### Interactive chat - -```console -$ docker model run ai/smollm2 -``` - -Output: - -```text -Interactive chat mode started. Type '/bye' to exit. -> Hi -Hi there! It's SmolLM, AI assistant. How can I help you today? -> /bye -Chat session ended. -``` - -> [!TIP] -> -> You can also use chat mode in the Docker Desktop Dashboard when you select the model in the **Models** tab. - -### Push a model to Docker Hub - -To push your model to Docker Hub: - -```console -$ docker model push / -``` - -### Tag a model - -To specify a particular version or variant of the model: - -```console -$ docker model tag -``` - -If no tag is provided, Docker defaults to `latest`. - -### View the logs - -Fetch logs from Docker Model Runner to monitor activity or debug issues. - -```console -$ docker model logs -``` - -The following flags are accepted: - -- `-f`/`--follow`: View logs with real-time streaming -- `--no-engines`: Exclude inference engine logs from the output - -### Remove a model - -Removes a downloaded model from your system. - -```console -$ docker model rm -``` - -Output: - -```text -Model removed successfully -``` - -## Integrate the Docker Model Runner into your software development lifecycle - -You can now start building your Generative AI application powered by the Docker Model Runner. - -If you want to try an existing GenAI application, follow these instructions. - -1. Set up the sample app. Clone and run the following repository: - - ```console - $ git clone https://github.com/docker/hello-genai.git - ``` - -2. In your terminal, navigate to the `hello-genai` directory. - -3. Run `run.sh` for pulling the chosen model and run the app(s): - -4. Open you app in the browser at the addresses specified in the repository [README](https://github.com/docker/hello-genai). - -You'll see the GenAI app's interface where you can start typing your prompts. - -You can now interact with your own GenAI app, powered by a local model. Try a few prompts and notice how fast the responses are — all running on your machine with Docker. - -## FAQs - -### What models are available? - -All the available models are hosted in the [public Docker Hub namespace of `ai`](https://hub.docker.com/u/ai). - -### What API endpoints are available? - -Once the feature is enabled, new API endpoints are available under the following base URLs: - -- From containers: `http://model-runner.docker.internal/` -- From host processes: `http://localhost:12434/`, assuming you have enabled TCP host access on default port 12434. - -Docker Model management endpoints: - -```text -POST /models/create -GET /models -GET /models/{namespace}/{name} -DELETE /models/{namespace}/{name} -``` - -OpenAI endpoints: - -```text -GET /engines/llama.cpp/v1/models -GET /engines/llama.cpp/v1/models/{namespace}/{name} -POST /engines/llama.cpp/v1/chat/completions -POST /engines/llama.cpp/v1/completions -POST /engines/llama.cpp/v1/embeddings -``` - -To call these endpoints via a Unix socket (`/var/run/docker.sock`), prefix their path with -with `/exp/vDD4.40`. - -> [!NOTE] -> You can omit `llama.cpp` from the path. For example: `POST /engines/v1/chat/completions`. - - -### How do I interact through the OpenAI API? - -#### From within a container - -To call the `chat/completions` OpenAI endpoint from within another container using `curl`: - -```bash -#!/bin/sh - -curl http://model-runner.docker.internal/engines/llama.cpp/v1/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "model": "ai/smollm2", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Please write 500 words about the fall of Rome." - } - ] - }' - -``` - -#### From the host using TCP - -To call the `chat/completions` OpenAI endpoint from the host via TCP: - -1. Enable the host-side TCP support from the Docker Desktop GUI, or via the [Docker Desktop CLI](/manuals/desktop/features/desktop-cli.md). - For example: `docker desktop enable model-runner --tcp `. -2. Interact with it as documented in the previous section using `localhost` and the correct port. - -```bash -#!/bin/sh - - curl http://localhost:12434/engines/llama.cpp/v1/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "model": "ai/smollm2", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Please write 500 words about the fall of Rome." - } - ] - }' -``` - -#### From the host using a Unix socket - -To call the `chat/completions` OpenAI endpoint through the Docker socket from the host using `curl`: - -```bash -#!/bin/sh - -curl --unix-socket $HOME/.docker/run/docker.sock \ - localhost/exp/vDD4.40/engines/llama.cpp/v1/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "model": "ai/smollm2", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Please write 500 words about the fall of Rome." - } - ] - }' -``` - -## Known issues - -### `docker model` is not recognised - -If you run a Docker Model Runner command and see: - -```text -docker: 'model' is not a docker command -``` - -It means Docker can't find the plugin because it's not in the expected CLI plugins directory. - -To fix this, create a symlink so Docker can detect it: - -```console -$ ln -s /Applications/Docker.app/Contents/Resources/cli-plugins/docker-model ~/.docker/cli-plugins/docker-model -``` - -Once linked, re-run the command. - -### No safeguard for running oversized models - -Currently, Docker Model Runner doesn't include safeguards to prevent you from launching models that exceed their system's available resources. Attempting to run a model that is too large for the host machine may result in severe slowdowns or render the system temporarily unusable. This issue is particularly common when running LLMs models without sufficient GPU memory or system RAM. - -### No consistent digest support in Model CLI - -The Docker Model CLI currently lacks consistent support for specifying models by image digest. As a temporary workaround, you should refer to models by name instead of digest. - -## Share feedback - -Thanks for trying out Docker Model Runner. Give feedback or report any bugs you may find through the **Give feedback** link next to the **Enable Docker Model Runner** setting. - -## Disable the feature - -To disable Docker Model Runner: - -1. Open the **Settings** view in Docker Desktop. -2. Navigate to the **Beta** tab in **Features in development**. -3. Clear the **Enable Docker Model Runner** checkbox. -4. Select **Apply & restart**. diff --git a/content/manuals/ai/model-runner/_index.md b/content/manuals/ai/model-runner/_index.md new file mode 100644 index 00000000000..8ca4c4bb233 --- /dev/null +++ b/content/manuals/ai/model-runner/_index.md @@ -0,0 +1,168 @@ +--- +title: Docker Model Runner +linkTitle: Model Runner +params: + sidebar: + group: AI and agents +weight: 30 +description: Learn how to use Docker Model Runner to manage and run AI models. +keywords: Docker, ai, model runner, docker desktop, docker engine, llm, openai, ollama, llama.cpp, vllm, diffusers, cpu, nvidia, cuda, amd, rocm, vulkan, cline, continue, cursor, image generation, stable diffusion +aliases: + - /desktop/features/model-runner/ + - /model-runner/ +--- + +{{< summary-bar feature_name="Docker Model Runner" >}} + +Docker Model Runner (DMR) makes it easy to manage, run, and +deploy AI models using Docker. Designed for developers, +Docker Model Runner streamlines the process of pulling, running, and serving +large language models (LLMs) and other AI models directly from Docker Hub, +any OCI-compliant registry, or [Hugging Face](https://huggingface.co/). + +With seamless integration into Docker Desktop and Docker +Engine, you can serve models via OpenAI and Ollama-compatible APIs, package GGUF files as +OCI Artifacts, and interact with models from both the command line and graphical +interface. + +Whether you're building generative AI applications, experimenting with machine +learning workflows, or integrating AI into your software development lifecycle, +Docker Model Runner provides a consistent, secure, and efficient way to work +with AI models locally. + +## Key features + +- [Pull and push models to and from Docker Hub or any OCI-compliant registry](https://hub.docker.com/u/ai) +- [Pull models from Hugging Face](https://huggingface.co/) +- Serve models on [OpenAI and Ollama-compatible APIs](api-reference.md) for easy integration with existing apps +- Support for [llama.cpp, vLLM, and Diffusers inference engines](inference-engines.md) (vLLM and Diffusers on Linux with NVIDIA GPUs) +- [Generate images from text prompts](inference-engines.md#diffusers) using Stable Diffusion models with the Diffusers backend +- Package GGUF and Safetensors files as OCI Artifacts and publish them to any Container Registry +- Run and interact with AI models directly from the command line or from the Docker Desktop GUI +- [Connect to AI coding tools](ide-integrations.md) like Cline, Continue, Cursor, and Aider +- [Configure context size and model parameters](configuration.md) to tune performance +- [Set up Open WebUI](openwebui-integration.md) for a ChatGPT-like web interface +- Manage local models and display logs +- Display prompt and response details +- Conversational context support for multi-turn interactions + +## Requirements + +Docker Model Runner is supported on the following platforms: + +{{< tabs >}} +{{< tab name="Windows">}} + +Windows(amd64): +- NVIDIA GPUs +- NVIDIA drivers 576.57+ + +Windows(arm64): +- OpenCL for Adreno +- Qualcomm Adreno GPU (6xx series and later) + + > [!NOTE] + > Some llama.cpp features might not be fully supported on the 6xx series. + +{{< /tab >}} +{{< tab name="MacOS">}} + +- Apple Silicon + +{{< /tab >}} +{{< tab name="Linux">}} + +Docker Engine only: + +- Supports CPU, NVIDIA (CUDA), AMD (ROCm), and Vulkan backends +- Requires NVIDIA driver 575.57.08+ when using NVIDIA GPUs + +{{< /tab >}} +{{}} + +## How Docker Model Runner works + +Models are pulled from Docker Hub, an OCI-compliant registry, or +[Hugging Face](https://huggingface.co/) the first time you use them and are +stored locally. They load into memory only at runtime when a request is made, +and unload when not in use to optimize resources. Because models can be large, +the initial pull may take some time. After that, they're cached locally for +faster access. You can interact with the model using +[OpenAI and Ollama-compatible APIs](api-reference.md). + +### Inference engines + +Docker Model Runner supports three inference engines: + +| Engine | Best for | Model format | +|--------|----------|--------------| +| [llama.cpp](inference-engines.md#llamacpp) | Local development, resource efficiency | GGUF (quantized) | +| [vLLM](inference-engines.md#vllm) | Production, high throughput | Safetensors | +| [Diffusers](inference-engines.md#diffusers) | Image generation (Stable Diffusion) | Safetensors | + +llama.cpp is the default engine and works on all platforms. vLLM requires NVIDIA GPUs and is supported on Linux x86_64 and Windows with WSL2. Diffusers enables image generation and requires NVIDIA GPUs on Linux (x86_64 or ARM64). See [Inference engines](inference-engines.md) for detailed comparison and setup. + +### Context size + +Models have a configurable context size (context length) that determines how many tokens they can process. The default varies by model but is typically 2,048-8,192 tokens. You can adjust this per-model: + +```console +$ docker model configure --context-size 8192 ai/qwen2.5-coder +``` + +See [Configuration options](configuration.md) for details on context size and other parameters. + +> [!TIP] +> +> Using Testcontainers or Docker Compose? +> [Testcontainers for Java](https://java.testcontainers.org/modules/docker_model_runner/) +> and [Go](https://golang.testcontainers.org/modules/dockermodelrunner/), and +> [Docker Compose](/manuals/ai/compose/models-and-compose.md) support Docker +> Model Runner. + +## Known issues + +### `docker model` is not recognised + +If you run a Docker Model Runner command and see: + +```text +docker: 'model' is not a docker command +``` + +It means Docker can't find the plugin because it's not in the expected CLI plugins directory. + +To fix this, create a symlink so Docker can detect it: + +```console +$ ln -s /Applications/Docker.app/Contents/Resources/cli-plugins/docker-model ~/.docker/cli-plugins/docker-model +``` + +Once linked, rerun the command. + +## Privacy and data collection + +Docker Model Runner respects your privacy settings in Docker Desktop. Data collection is controlled by the **Send usage statistics** setting: + +- **Disabled**: No usage data is collected +- **Enabled**: Only minimal, non-personal data is collected: + - [Model names](https://github.com/docker/model-runner/blob/eb76b5defb1a598396f99001a500a30bbbb48f01/pkg/metrics/metrics.go#L96) (via HEAD requests to Docker Hub) + - User agent information + - Whether requests originate from the host or containers + +When using Docker Model Runner with Docker Engine, HEAD requests to Docker Hub are made to track model names, regardless of any settings. + +No prompt content, responses, or personally identifiable information is ever collected. + +## Share feedback + +Thanks for trying out Docker Model Runner. To report bugs or request features, [open an issue on GitHub](https://github.com/docker/model-runner/issues). You can also give feedback through the **Give feedback** link next to the **Enable Docker Model Runner** setting. + +## Next steps + +- [Get started with DMR](get-started.md) - Enable DMR and run your first model +- [API reference](api-reference.md) - OpenAI and Ollama-compatible API documentation +- [Configuration options](configuration.md) - Context size and runtime parameters +- [Inference engines](inference-engines.md) - llama.cpp, vLLM, and Diffusers details +- [IDE integrations](ide-integrations.md) - Connect Cline, Continue, Cursor, and more +- [Open WebUI integration](openwebui-integration.md) - Set up a web chat interface diff --git a/content/manuals/ai/model-runner/api-reference.md b/content/manuals/ai/model-runner/api-reference.md new file mode 100644 index 00000000000..edc36eec9aa --- /dev/null +++ b/content/manuals/ai/model-runner/api-reference.md @@ -0,0 +1,439 @@ +--- +title: DMR REST API +description: Reference documentation for the Docker Model Runner REST API endpoints, including OpenAI, Anthropic, and Ollama compatibility. +weight: 30 +keywords: Docker, ai, model runner, rest api, openai, anthropic, ollama, endpoints, documentation, cline, continue, cursor +--- + +Once Model Runner is enabled, new API endpoints are available. You can use +these endpoints to interact with a model programmatically. Docker Model Runner +provides compatibility with OpenAI, Anthropic, and Ollama API formats. + +## Determine the base URL + +The base URL to interact with the endpoints depends on how you run Docker and +which API format you're using. + +{{< tabs >}} +{{< tab name="Docker Desktop">}} + +| Access from | Base URL | +|-------------|----------| +| Containers | `http://model-runner.docker.internal` | +| Host processes (TCP) | `http://localhost:12434` | + +> [!NOTE] +> TCP host access must be enabled. See [Enable Docker Model Runner](get-started.md#enable-docker-model-runner-in-docker-desktop). + +{{< /tab >}} +{{< tab name="Docker Engine">}} + +| Access from | Base URL | +|-------------|----------| +| Containers | `http://172.17.0.1:12434` | +| Host processes | `http://localhost:12434` | + +> [!NOTE] +> The `172.17.0.1` interface may not be available by default to containers + within a Compose project. +> In this case, add an `extra_hosts` directive to your Compose service YAML: +> +> ```yaml +> extra_hosts: +> - "model-runner.docker.internal:host-gateway" +> ``` +> Then you can access the Docker Model Runner APIs at `http://model-runner.docker.internal:12434/` + +{{< /tab >}} +{{}} + +### Base URLs for third-party tools + +When configuring third-party tools that expect OpenAI-compatible APIs, use these base URLs: + +| Tool type | Base URL format | +|-----------|-----------------| +| OpenAI SDK / clients | `http://localhost:12434/engines/v1` | +| Anthropic SDK / clients | `http://localhost:12434` | +| Ollama-compatible clients | `http://localhost:12434` | + +See [IDE and tool integrations](ide-integrations.md) for specific configuration examples. + +## Supported APIs + +Docker Model Runner supports multiple API formats: + +| API | Description | Use case | +|-----|-------------|----------| +| [OpenAI API](#openai-compatible-api) | OpenAI-compatible chat completions, embeddings | Most AI frameworks and tools | +| [Anthropic API](#anthropic-compatible-api) | Anthropic-compatible messages endpoint | Tools built for Claude | +| [Ollama API](#ollama-compatible-api) | Ollama-compatible endpoints | Tools built for Ollama | +| [Image Generation API](#image-generation-api-diffusers) | Diffusers-based image generation | Generating images from text prompts | +| [DMR API](#dmr-native-endpoints) | Native Docker Model Runner endpoints | Model management | + +## OpenAI-compatible API + +DMR implements the OpenAI API specification for maximum compatibility with existing tools and frameworks. + +### Endpoints + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/engines/v1/models` | GET | [List models](https://platform.openai.com/docs/api-reference/models/list) | +| `/engines/v1/models/{namespace}/{name}` | GET | [Retrieve model](https://platform.openai.com/docs/api-reference/models/retrieve) | +| `/engines/v1/chat/completions` | POST | [Create chat completion](https://platform.openai.com/docs/api-reference/chat/create) | +| `/engines/v1/completions` | POST | [Create completion](https://platform.openai.com/docs/api-reference/completions/create) | +| `/engines/v1/embeddings` | POST | [Create embeddings](https://platform.openai.com/docs/api-reference/embeddings/create) | + +> [!NOTE] +> You can optionally include the engine name in the path: `/engines/llama.cpp/v1/chat/completions`. +> This is useful when running multiple inference engines. + +### Model name format + +When specifying a model in API requests, use the full model identifier including the namespace: + +```json +{ + "model": "ai/smollm2", + "messages": [...] +} +``` + +Common model name formats: +- Docker Hub models: `ai/smollm2`, `ai/llama3.2`, `ai/qwen2.5-coder` +- Tagged versions: `ai/smollm2:360M-Q4_K_M` +- Custom models: `myorg/mymodel` + +### Supported parameters + +The following OpenAI API parameters are supported: + +| Parameter | Type | Description | +|-----------|------|-------------| +| `model` | string | Required. The model identifier. | +| `messages` | array | Required for chat completions. The conversation history. | +| `prompt` | string | Required for completions. The prompt text. | +| `max_tokens` | integer | Maximum tokens to generate. | +| `temperature` | float | Sampling temperature (0.0-2.0). | +| `top_p` | float | Nucleus sampling parameter (0.0-1.0). | +| `stream` | Boolean | Enable streaming responses. | +| `stop` | string/array | Stop sequences. | +| `presence_penalty` | float | Presence penalty (-2.0 to 2.0). | +| `frequency_penalty` | float | Frequency penalty (-2.0 to 2.0). | + +### Limitations and differences from OpenAI + +Be aware of these differences when using DMR's OpenAI-compatible API: + +| Feature | DMR behavior | +|---------|--------------| +| API key | Not required. DMR ignores the `Authorization` header. | +| Function calling | Supported with llama.cpp for compatible models. | +| Vision | Supported for multi-modal models (e.g., LLaVA). | +| JSON mode | Supported via `response_format: {"type": "json_object"}`. | +| Logprobs | Supported. | +| Token counting | Uses the model's native token encoder, which may differ from OpenAI's. | + +## Anthropic-compatible API + +DMR provides [Anthropic Messages API](https://platform.claude.com/docs/en/api/messages) compatibility for tools and frameworks built for Claude. + +### Endpoints + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/anthropic/v1/messages` | POST | [Create a message](https://platform.claude.com/docs/en/api/messages/create) | +| `/anthropic/v1/messages/count_tokens` | POST | [Count tokens](https://docs.anthropic.com/en/api/messages-count-tokens) | + +### Supported parameters + +The following Anthropic API parameters are supported: + +| Parameter | Type | Description | +|-----------|------|-------------| +| `model` | string | Required. The model identifier. | +| `messages` | array | Required. The conversation messages. | +| `max_tokens` | integer | Maximum tokens to generate. | +| `temperature` | float | Sampling temperature (0.0-1.0). | +| `top_p` | float | Nucleus sampling parameter. | +| `top_k` | integer | Top-k sampling parameter. | +| `stream` | Boolean | Enable streaming responses. | +| `stop_sequences` | array | Custom stop sequences. | +| `system` | string | System prompt. | + +### Example: Chat with Anthropic API + +```bash +curl http://localhost:12434/v1/messages \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "max_tokens": 1024, + "messages": [ + {"role": "user", "content": "Hello!"} + ] + }' +``` + +### Example: Streaming response + +```bash +curl http://localhost:12434/v1/messages \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "max_tokens": 1024, + "stream": true, + "messages": [ + {"role": "user", "content": "Count from 1 to 10"} + ] + }' +``` + +## Ollama-compatible API + +DMR also provides Ollama-compatible endpoints for tools and frameworks built for Ollama. + +### Endpoints + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/tags` | GET | List available models | +| `/api/show` | POST | Show model information | +| `/api/chat` | POST | Generate chat completion | +| `/api/generate` | POST | Generate completion | +| `/api/embeddings` | POST | Generate embeddings | + +### Example: Chat with Ollama API + +```bash +curl http://localhost:12434/api/chat \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "messages": [ + {"role": "user", "content": "Hello!"} + ] + }' +``` + +### Example: List models + +```bash +curl http://localhost:12434/api/tags +``` + +## Image generation API (Diffusers) + +DMR supports image generation through the Diffusers backend, enabling you to generate +images from text prompts using models like Stable Diffusion. + +> [!NOTE] +> The Diffusers backend requires an NVIDIA GPU with CUDA support and is only +> available on Linux (x86_64 and ARM64). See [Inference engines](inference-engines.md#diffusers) +> for setup instructions. + +### Endpoint + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/engines/diffusers/v1/images/generations` | POST | Generate an image from a text prompt | + +### Supported parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `model` | string | Required. The model identifier (e.g., `stable-diffusion:Q4`). | +| `prompt` | string | Required. The text description of the image to generate. | +| `size` | string | Image dimensions in `WIDTHxHEIGHT` format (e.g., `512x512`). | + +### Response format + +The API returns a JSON response with the generated image encoded in base64: + +```json +{ + "data": [ + { + "b64_json": "" + } + ] +} +``` + +### Example: Generate an image + +```bash +curl -s -X POST http://localhost:12434/engines/diffusers/v1/images/generations \ + -H "Content-Type: application/json" \ + -d '{ + "model": "stable-diffusion:Q4", + "prompt": "A picture of a nice cat", + "size": "512x512" + }' | jq -r '.data[0].b64_json' | base64 -d > image.png +``` + +This command: +1. Sends a POST request to the Diffusers image generation endpoint +2. Specifies the model, prompt, and output image size +3. Extracts the base64-encoded image from the response using `jq` +4. Decodes the base64 data and saves it as `image.png` + + +## DMR native endpoints + +These endpoints are specific to Docker Model Runner for model management: + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/models/create` | POST | Pull/create a model | +| `/models` | GET | List local models | +| `/models/{namespace}/{name}` | GET | Get model details | +| `/models/{namespace}/{name}` | DELETE | Delete a local model | + +## REST API examples + +### Request from within a container + +To call the `chat/completions` OpenAI endpoint from within another container using `curl`: + +```bash +#!/bin/sh + +curl http://model-runner.docker.internal/engines/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Please write 500 words about the fall of Rome." + } + ] + }' + +``` + +### Request from the host using TCP + +To call the `chat/completions` OpenAI endpoint from the host via TCP: + +1. Enable the host-side TCP support from the Docker Desktop GUI, or via the [Docker Desktop CLI](/manuals/desktop/features/desktop-cli.md). + For example: `docker desktop enable model-runner --tcp `. + + If you are running on Windows, also enable GPU-backed inference. + See [Enable Docker Model Runner](get-started.md#enable-docker-model-runner-in-docker-desktop). + +1. Interact with it as documented in the previous section using `localhost` and the correct port. + +```bash +#!/bin/sh + +curl http://localhost:12434/engines/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Please write 500 words about the fall of Rome." + } + ] + }' +``` + +### Request from the host using a Unix socket + +To call the `chat/completions` OpenAI endpoint through the Docker socket from the host using `curl`: + +```bash +#!/bin/sh + +curl --unix-socket $HOME/.docker/run/docker.sock \ + localhost/exp/vDD4.40/engines/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Please write 500 words about the fall of Rome." + } + ] + }' +``` + +### Streaming responses + +To receive streaming responses, set `stream: true`: + +```bash +curl http://localhost:12434/engines/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "stream": true, + "messages": [ + {"role": "user", "content": "Count from 1 to 10"} + ] + }' +``` + +## Using with OpenAI SDKs + +### Python + +```python +from openai import OpenAI + +client = OpenAI( + base_url="http://localhost:12434/engines/v1", + api_key="not-needed" # DMR doesn't require an API key +) + +response = client.chat.completions.create( + model="ai/smollm2", + messages=[ + {"role": "user", "content": "Hello!"} + ] +) + +print(response.choices[0].message.content) +``` + +### Node.js + +```javascript +import OpenAI from 'openai'; + +const client = new OpenAI({ + baseURL: 'http://localhost:12434/engines/v1', + apiKey: 'not-needed', +}); + +const response = await client.chat.completions.create({ + model: 'ai/smollm2', + messages: [{ role: 'user', content: 'Hello!' }], +}); + +console.log(response.choices[0].message.content); +``` + +## What's next + +- [IDE and tool integrations](ide-integrations.md) - Configure Cline, Continue, Cursor, and other tools +- [Configuration options](configuration.md) - Adjust context size and runtime parameters +- [Inference engines](inference-engines.md) - Learn about llama.cpp, vLLM, and Diffusers options diff --git a/content/manuals/ai/model-runner/configuration.md b/content/manuals/ai/model-runner/configuration.md new file mode 100644 index 00000000000..89934e97a7c --- /dev/null +++ b/content/manuals/ai/model-runner/configuration.md @@ -0,0 +1,305 @@ +--- +title: Configuration options +description: Configure context size, runtime parameters, and model behavior in Docker Model Runner. +weight: 35 +keywords: Docker, ai, model runner, configuration, context size, context length, tokens, llama.cpp, parameters +--- + +Docker Model Runner provides several configuration options to tune model behavior, +memory usage, and inference performance. This guide covers the key settings and +how to apply them. + +## Context size (context length) + +The context size determines the maximum number of tokens a model can process in +a single request, including both the input prompt and generated output. This is +one of the most important settings affecting memory usage and model capabilities. + +### Default context size + +By default, Docker Model Runner uses a context size that balances capability with +resource efficiency: + +| Engine | Default behavior | +|--------|------------------| +| llama.cpp | 4096 tokens | +| vLLM | Uses the model's maximum trained context size | + +> [!NOTE] +> The actual default varies by model. Most models support between 2,048 and 8,192 +> tokens by default. Some newer models support 32K, 128K, or even larger contexts. + +### Configure context size + +You can adjust context size per model using the `docker model configure` command: + +```console +$ docker model configure --context-size 8192 ai/qwen2.5-coder +``` + +Or in a Compose file: + +```yaml +models: + llm: + model: ai/qwen2.5-coder + context_size: 8192 +``` + +### Context size guidelines + +| Context size | Typical use case | Memory impact | +|--------------|------------------|---------------| +| 2,048 | Simple queries, short code snippets | Low | +| 4,096 | Standard conversations, medium code files | Moderate | +| 8,192 | Long conversations, larger code files | Higher | +| 16,384+ | Extended documents, multi-file context | High | + +> [!IMPORTANT] +> Larger context sizes require more memory (RAM/VRAM). If you experience out-of-memory +> errors, reduce the context size. As a rough guide, each additional 1,000 tokens +> requires approximately 100-500 MB of additional memory, depending on the model size. + +### Check a model's maximum context + +To see a model's configuration including context size: + +```console +$ docker model inspect ai/qwen2.5-coder +``` + +> [!NOTE] +> The `docker model inspect` command shows the model's maximum supported context length +> (e.g., `gemma3.context_length`), not the configured context size. The configured context +> size is what you set with `docker model configure --context-size` and represents the +> actual limit used during inference, which should be less than or equal to the model's +> maximum supported context length. + +## Runtime flags + +Runtime flags let you pass parameters directly to the underlying inference engine. +This provides fine-grained control over model behavior. + +### Using runtime flags + +Runtime flags can be provided through multiple mechanisms: + +#### Using Docker Compose + +In a Compose file: + +```yaml +models: + llm: + model: ai/qwen2.5-coder + context_size: 4096 + runtime_flags: + - "--temp" + - "0.7" + - "--top-p" + - "0.9" +``` + +#### Using Command Line + +With the `docker model configure` command: + +```console +$ docker model configure ai/qwen2.5-coder -- --temp 0.7 --top-p 0.9 +``` + +### Common llama.cpp parameters + +These are the most commonly used llama.cpp parameters. You don't need to look up +the llama.cpp documentation for typical use cases. + +#### Sampling parameters + +| Flag | Description | Default | Range | +|------|-------------|---------|-------| +| `--temp` | Temperature for sampling. Lower = more deterministic, higher = more creative | 0.8 | 0.0-2.0 | +| `--top-k` | Limit sampling to top K tokens. Lower = more focused | 40 | 1-100 | +| `--top-p` | Nucleus sampling threshold. Lower = more focused | 0.9 | 0.0-1.0 | +| `--min-p` | Minimum probability threshold | 0.05 | 0.0-1.0 | +| `--repeat-penalty` | Penalty for repeating tokens | 1.1 | 1.0-2.0 | + +**Example: Deterministic output (for code generation)** + +```yaml +runtime_flags: + - "--temp" + - "0" + - "--top-k" + - "1" +``` + +**Example: Creative output (for storytelling)** + +```yaml +runtime_flags: + - "--temp" + - "1.2" + - "--top-p" + - "0.95" +``` + +#### Performance parameters + +| Flag | Description | Default | Notes | +|------|-------------|---------|-------| +| `--threads` | CPU threads for generation | Auto | Set to number of performance cores | +| `--threads-batch` | CPU threads for batch processing | Auto | Usually same as `--threads` | +| `--batch-size` | Batch size for prompt processing | 512 | Higher = faster prompt processing | +| `--mlock` | Lock model in memory | Off | Prevents swapping, requires sufficient RAM | +| `--no-mmap` | Disable memory mapping | Off | May improve performance on some systems | + +**Example: Optimized for multi-core CPU** + +```yaml +runtime_flags: + - "--threads" + - "8" + - "--batch-size" + - "1024" +``` + +#### GPU parameters + +| Flag | Description | Default | Notes | +|------|-------------|---------|-------| +| `--n-gpu-layers` | Layers to offload to GPU | All (if GPU available) | Reduce if running out of VRAM | +| `--main-gpu` | GPU to use for computation | 0 | For multi-GPU systems | +| `--split-mode` | How to split across GPUs | layer | Options: `none`, `layer`, `row` | + +**Example: Partial GPU offload (limited VRAM)** + +```yaml +runtime_flags: + - "--n-gpu-layers" + - "20" +``` + +#### Advanced parameters + +| Flag | Description | Default | +|------|-------------|---------| +| `--rope-scaling` | RoPE scaling method | Auto | +| `--rope-freq-base` | RoPE base frequency | Model default | +| `--rope-freq-scale` | RoPE frequency scale | Model default | +| `--no-prefill-assistant` | Disable assistant pre-fill | Off | +| `--reasoning-budget` | Token budget for reasoning models | 0 (disabled) | + +### vLLM parameters + +When using the vLLM backend, different parameters are available. + +Use `--hf_overrides` to pass HuggingFace model config overrides as JSON: + +```console +$ docker model configure --hf_overrides '{"rope_scaling": {"type": "dynamic", "factor": 2.0}}' ai/model-vllm +``` + +## Configuration presets + +Here are complete configuration examples for common use cases. + +### Code completion (fast, deterministic) + +```yaml +models: + coder: + model: ai/qwen2.5-coder + context_size: 4096 + runtime_flags: + - "--temp" + - "0.1" + - "--top-k" + - "1" + - "--batch-size" + - "1024" +``` + +### Chat assistant (balanced) + +```yaml +models: + assistant: + model: ai/llama3.2 + context_size: 8192 + runtime_flags: + - "--temp" + - "0.7" + - "--top-p" + - "0.9" + - "--repeat-penalty" + - "1.1" +``` + +### Creative writing (high temperature) + +```yaml +models: + writer: + model: ai/llama3.2 + context_size: 8192 + runtime_flags: + - "--temp" + - "1.2" + - "--top-p" + - "0.95" + - "--repeat-penalty" + - "1.0" +``` + +### Long document analysis (large context) + +```yaml +models: + analyzer: + model: ai/qwen2.5-coder:14B + context_size: 32768 + runtime_flags: + - "--mlock" + - "--batch-size" + - "2048" +``` + +### Low memory system + +```yaml +models: + efficient: + model: ai/smollm2:360M-Q4_K_M + context_size: 2048 + runtime_flags: + - "--threads" + - "4" +``` + +## Environment-based configuration + +You can also configure models via environment variables in containers: + +| Variable | Description | +|----------|-------------| +| `LLM_URL` | Auto-injected URL of the model endpoint | +| `LLM_MODEL` | Auto-injected model identifier | + +See [Models and Compose](/manuals/ai/compose/models-and-compose.md) for details on how these are populated. + +## Reset configuration + +Configuration set via `docker model configure` persists until the model is removed. +To reset configuration: + +```console +$ docker model configure --context-size -1 ai/qwen2.5-coder +``` + +Using `-1` resets to the default value. + +## What's next + +- [Inference engines](inference-engines.md) - Learn about llama.cpp and vLLM +- [API reference](api-reference.md) - API parameters for per-request configuration +- [Models and Compose](/manuals/ai/compose/models-and-compose.md) - Configure models in Compose applications diff --git a/content/manuals/ai/model-runner/examples.md b/content/manuals/ai/model-runner/examples.md new file mode 100644 index 00000000000..b29adaf3bf7 --- /dev/null +++ b/content/manuals/ai/model-runner/examples.md @@ -0,0 +1,219 @@ +--- +title: DMR examples +description: Example projects and CI/CD workflows for Docker Model Runner. +weight: 40 +keywords: Docker, ai, model runner, examples, github actions, genai, sample project +--- + +See some examples of complete workflows using Docker Model Runner. + +## Sample project + +You can now start building your generative AI application powered by Docker +Model Runner. + +If you want to try an existing GenAI application, follow these steps: + +1. Set up the sample app. Clone and run the following repository: + + ```console + $ git clone https://github.com/docker/hello-genai.git + ``` + +1. In your terminal, go to the `hello-genai` directory. + +1. Run `run.sh` to pull the chosen model and run the app. + +1. Open your app in the browser at the addresses specified in the repository + [README](https://github.com/docker/hello-genai). + +You see the GenAI app's interface where you can start typing your prompts. + +You can now interact with your own GenAI app, powered by a local model. Try a +few prompts and notice how fast the responses are — all running on your machine +with Docker. + +## Use Model Runner in GitHub Actions + +Here is an example of how to use Model Runner as part of a GitHub workflow. +The example installs Model Runner, tests the installation, pulls and runs a +model, interacts with the model via the API, and deletes the model. + +```yaml {title="dmr-run.yml", collapse=true} +name: Docker Model Runner Example Workflow + +permissions: + contents: read + +on: + workflow_dispatch: + inputs: + test_model: + description: 'Model to test with (default: ai/smollm2:360M-Q4_K_M)' + required: false + type: string + default: 'ai/smollm2:360M-Q4_K_M' + +jobs: + dmr-test: + runs-on: ubuntu-latest + timeout-minutes: 30 + + steps: + - name: Set up Docker + uses: docker/setup-docker-action@{{% param "setup_docker_action_version" %}} + + - name: Install docker-model-plugin + run: | + echo "Installing docker-model-plugin..." + # Add Docker's official GPG key: + sudo apt-get update + sudo apt-get install ca-certificates curl + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + sudo apt-get install -y docker-model-plugin + + echo "Installation completed successfully" + + - name: Test docker model version + run: | + echo "Testing docker model version command..." + sudo docker model version + + # Verify the command returns successfully + if [ $? -eq 0 ]; then + echo "✅ docker model version command works correctly" + else + echo "❌ docker model version command failed" + exit 1 + fi + + - name: Pull the provided model and run it + run: | + MODEL="${{ github.event.inputs.test_model || 'ai/smollm2:360M-Q4_K_M' }}" + echo "Testing with model: $MODEL" + + # Test model pull + echo "Pulling model..." + sudo docker model pull "$MODEL" + + if [ $? -eq 0 ]; then + echo "✅ Model pull successful" + else + echo "❌ Model pull failed" + exit 1 + fi + + # Test basic model run (with timeout to avoid hanging) + echo "Testing docker model run..." + timeout 60s sudo docker model run "$MODEL" "Give me a fact about whales." || { + exit_code=$? + if [ $exit_code -eq 124 ]; then + echo "✅ Model run test completed (timed out as expected for non-interactive test)" + else + echo "❌ Model run failed with exit code: $exit_code" + exit 1 + fi + } + - name: Test model pull and run + run: | + MODEL="${{ github.event.inputs.test_model || 'ai/smollm2:360M-Q4_K_M' }}" + echo "Testing with model: $MODEL" + + # Test model pull + echo "Pulling model..." + sudo docker model pull "$MODEL" + + if [ $? -eq 0 ]; then + echo "✅ Model pull successful" + else + echo "❌ Model pull failed" + exit 1 + fi + + # Test basic model run (with timeout to avoid hanging) + echo "Testing docker model run..." + timeout 60s sudo docker model run "$MODEL" "Give me a fact about whales." || { + exit_code=$? + if [ $exit_code -eq 124 ]; then + echo "✅ Model run test completed (timed out as expected for non-interactive test)" + else + echo "❌ Model run failed with exit code: $exit_code" + exit 1 + fi + } + + - name: Test API endpoint + run: | + MODEL="${{ github.event.inputs.test_model || 'ai/smollm2:360M-Q4_K_M' }}" + echo "Testing API endpoint with model: $MODEL" + + # Test API call with curl + echo "Testing API call..." + RESPONSE=$(curl -s http://localhost:12434/engines/llama.cpp/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d "{ + \"model\": \"$MODEL\", + \"messages\": [ + { + \"role\": \"user\", + \"content\": \"Say hello\" + } + ], + \"top_k\": 1, + \"temperature\": 0 + }") + + if [ $? -eq 0 ]; then + echo "✅ API call successful" + echo "Response received: $RESPONSE" + + # Check if response contains "hello" (case-insensitive) + if echo "$RESPONSE" | grep -qi "hello"; then + echo "✅ Response contains 'hello' (case-insensitive)" + else + echo "❌ Response does not contain 'hello'" + echo "Full response: $RESPONSE" + exit 1 + fi + else + echo "❌ API call failed" + exit 1 + fi + + - name: Test model cleanup + run: | + MODEL="${{ github.event.inputs.test_model || 'ai/smollm2:360M-Q4_K_M' }}" + + echo "Cleaning up test model..." + sudo docker model rm "$MODEL" || echo "Model removal failed or model not found" + + # Verify model was removed + echo "Verifying model cleanup..." + sudo docker model ls + + echo "✅ Model cleanup completed" + + - name: Report success + if: success() + run: | + echo "🎉 Docker Model Runner daily health check completed successfully!" + echo "All tests passed:" + echo " ✅ docker-model-plugin installation successful" + echo " ✅ docker model version command working" + echo " ✅ Model pull and run operations successful" + echo " ✅ API endpoint operations successful" + echo " ✅ Cleanup operations successful" +``` + +## Related pages + +- [Models and Compose](../compose/models-and-compose.md) diff --git a/content/manuals/ai/model-runner/get-started.md b/content/manuals/ai/model-runner/get-started.md new file mode 100644 index 00000000000..aece1e2761d --- /dev/null +++ b/content/manuals/ai/model-runner/get-started.md @@ -0,0 +1,225 @@ +--- +title: Get started with DMR +description: How to install, enable, and use Docker Model Runner to manage and run AI models. +weight: 10 +keywords: Docker, ai, model runner, setup, installation, getting started +--- + +Docker Model Runner (DMR) lets you run and manage AI models locally using Docker. This page shows you how to enable DMR, pull and run a model, configure model settings, and publish custom models. + +## Enable Docker Model Runner + +You can enable DMR using Docker Desktop or Docker Engine. Follow the instructions below based on your setup. + +### Docker Desktop + +1. In the settings view, go to the **AI** tab. +1. Select the **Enable Docker Model Runner** setting. +1. If you use Windows with a supported NVIDIA GPU, you also see and can select + **Enable GPU-backed inference**. +1. Optional: To enable TCP support, select **Enable host-side TCP support**. + 1. In the **Port** field, type the port you want to use. + 1. If you interact with Model Runner from a local frontend web app, in + **CORS Allows Origins**, select the origins that Model Runner should + accept requests from. An origin is the URL where your web app runs, for + example `http://localhost:3131`. + +You can now use the `docker model` command in the CLI and view and interact +with your local models in the **Models** tab in the Docker Desktop Dashboard. + +### Docker Engine + +1. Ensure you have installed [Docker Engine](/engine/install/). +1. Docker Model Runner is available as a package. To install it, run: + + {{< tabs >}} + {{< tab name="Ubuntu/Debian">}} + + ```bash + $ sudo apt-get update + $ sudo apt-get install docker-model-plugin + ``` + + {{< /tab >}} + {{< tab name="RPM-base distributions">}} + + ```bash + $ sudo dnf update + $ sudo dnf install docker-model-plugin + ``` + + {{< /tab >}} + {{< /tabs >}} + +1. Test the installation: + + ```bash + $ docker model version + $ docker model run ai/smollm2 + ``` + +> [!NOTE] +> TCP support is enabled by default for Docker Engine on port `12434`. + +### Update DMR in Docker Engine + +To update Docker Model Runner in Docker Engine, uninstall it with +[`docker model uninstall-runner`](/reference/cli/docker/model/uninstall-runner/) +then reinstall it: + +```bash +docker model uninstall-runner --images && docker model install-runner +``` + +> [!NOTE] +> With the above command, local models are preserved. +> To delete the models during the upgrade, add the `--models` option to the +> `uninstall-runner` command. + +## Pull a model + +Models are cached locally. + +> [!NOTE] +> +> When you use the Docker CLI, you can also pull models directly from +> [HuggingFace](https://huggingface.co/). + +{{< tabs group="release" >}} +{{< tab name="From Docker Desktop">}} + +1. Select **Models** and select the **Docker Hub** tab. +1. Find the model you want and select **Pull**. + +![Screenshot showing the Docker Hub view.](./images/dmr-catalog.png) + +{{< /tab >}} +{{< tab name="From the Docker CLI">}} + +Use the [`docker model pull` command](/reference/cli/docker/model/pull/). +For example: + +```bash {title="Pulling from Docker Hub"} +docker model pull ai/smollm2:360M-Q4_K_M +``` + +```bash {title="Pulling from HuggingFace"} +docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF +``` + +{{< /tab >}} +{{< /tabs >}} + +## Run a model + +{{< tabs group="release" >}} +{{< tab name="From Docker Desktop">}} + +1. Select **Models** and select the **Local** tab. +1. Select the play button. The interactive chat screen opens. + +![Screenshot showing the Local view.](./images/dmr-run.png) + +{{< /tab >}} +{{< tab name="From the Docker CLI" >}} + +Use the [`docker model run` command](/reference/cli/docker/model/run/). + +{{< /tab >}} +{{< /tabs >}} + +## Configure a model + +You can configure a model, such as its maximum token limit and more, +use Docker Compose. +See [Models and Compose - Model configuration options](../compose/models-and-compose.md#model-configuration-options). + +## Publish a model + +> [!NOTE] +> +> This works for any Container Registry supporting OCI Artifacts, not only +> Docker Hub. + +You can tag existing models with a new name and publish them under a different +namespace and repository: + +```bash +# Tag a pulled model under a new name +$ docker model tag ai/smollm2 myorg/smollm2 + +# Push it to Docker Hub +$ docker model push myorg/smollm2 +``` + +For more details, see the [`docker model tag`](/reference/cli/docker/model/tag) +and [`docker model push`](/reference/cli/docker/model/push) command +documentation. + +You can also package a model file in GGUF format as an OCI Artifact and publish +it to Docker Hub. + +```bash +# Download a model file in GGUF format, for example from HuggingFace +$ curl -L -o model.gguf https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_K_M.gguf + +# Package it as OCI Artifact and push it to Docker Hub +$ docker model package --gguf "$(pwd)/model.gguf" --push myorg/mistral-7b-v0.1:Q4_K_M +``` + +For more details, see the +[`docker model package`](/reference/cli/docker/model/package/) command +documentation. + +## Troubleshooting + +### Display the logs + +To troubleshoot issues, display the logs: + +{{< tabs group="release" >}} +{{< tab name="From Docker Desktop">}} + +Select **Models** and select the **Logs** tab. + +![Screenshot showing the Models view.](./images/dmr-logs.png) + +{{< /tab >}} +{{< tab name="From the Docker CLI">}} + +Use the [`docker model logs` command](/reference/cli/docker/model/logs/). + +{{< /tab >}} +{{< /tabs >}} + +### Inspect requests and responses + +Inspecting requests and responses helps you diagnose model-related issues. +For example, you can evaluate context usage to verify you stay within the model's context +window or display the full body of a request to control the parameters you are passing to your models +when developing with a framework. + +In Docker Desktop, to inspect the requests and responses for each model: + +1. Select **Models** and select the **Requests** tab. This view displays all the requests to all models: + - The time the request was sent. + - The model name and version + - The prompt/request + - The context usage + - The time it took for the response to be generated. +1. Select one of the requests to display further details: + - In the **Overview** tab, view the token usage, response metadata and generation speed, and the actual prompt and response. + - In the **Request** and **Response** tabs, view the full JSON payload of the request and the response. + +> [!NOTE] +> You can also display the requests for a specific model when you select a model and then select the **Requests** tab. + +## Related pages + +- [API reference](./api-reference.md) - OpenAI and Ollama-compatible API documentation +- [Configuration options](./configuration.md) - Context size and runtime parameters +- [Inference engines](./inference-engines.md) - llama.cpp and vLLM details +- [IDE integrations](./ide-integrations.md) - Connect Cline, Continue, Cursor, and more +- [Open WebUI integration](./openwebui-integration.md) - Set up a web chat interface +- [Models and Compose](../compose/models-and-compose.md) - Use models in Compose applications +- [Docker Model Runner CLI reference](/reference/cli/docker/model) - Complete CLI documentation \ No newline at end of file diff --git a/content/manuals/ai/model-runner/ide-integrations.md b/content/manuals/ai/model-runner/ide-integrations.md new file mode 100644 index 00000000000..2d6e477c59b --- /dev/null +++ b/content/manuals/ai/model-runner/ide-integrations.md @@ -0,0 +1,373 @@ +--- +title: IDE and tool integrations +description: Configure popular AI coding assistants and tools to use Docker Model Runner as their backend. +weight: 40 +keywords: Docker, ai, model runner, cline, continue, cursor, vscode, ide, integration, openai, ollama, claude, anthropic, claude-code +--- + +Docker Model Runner can serve as a local backend for popular AI coding assistants +and development tools. This guide shows how to configure common tools to use +models running in DMR. + +## Prerequisites + +Before configuring any tool: + +1. [Enable Docker Model Runner](get-started.md#enable-docker-model-runner) in Docker Desktop or Docker Engine. +2. Enable TCP host access: + - Docker Desktop: Enable **host-side TCP support** in Settings > AI, or run: + ```console + $ docker desktop enable model-runner --tcp 12434 + ``` + - Docker Engine: TCP is enabled by default on port 12434. +3. Pull a model: + ```console + $ docker model pull ai/qwen2.5-coder + ``` + +> [!TIP] +> +> The default context size for many models (such as `gpt-oss`) is 4,096 tokens, which is limiting for coding tasks. +> You can repackage it with a larger context window: +> +> ```console +> $ docker model pull gpt-oss +> $ docker model package --from ai/gpt-oss --context-size 32000 gpt-oss:32k +> ``` +> Alternatively, models like ai/glm-4.7-flash, ai/qwen2.5-coder, and ai/devstral-small-2 +> come with 128K context by default and work without repackaging. + +## Cline (VS Code) + +[Cline](https://github.com/cline/cline) is an AI coding assistant for VS Code. + +### Configuration + +1. Open VS Code and go to the Cline extension settings. +2. Select **OpenAI Compatible** as the API provider. +3. Configure the following settings: + +| Setting | Value | +|---------|-------| +| Base URL | `http://localhost:12434/engines/v1` | +| API Key | `not-needed` (or any placeholder value) | +| Model ID | `ai/qwen2.5-coder` (or your preferred model) | + +> [!IMPORTANT] +> The base URL must include `/engines/v1` at the end. Do not include a trailing slash. + +### Troubleshooting Cline + +If Cline fails to connect: + +1. Verify DMR is running: + ```console + $ docker model status + ``` + +2. Test the endpoint directly: + ```console + $ curl http://localhost:12434/engines/v1/models + ``` + +3. Check that CORS is configured if running a web-based version: + - In Docker Desktop Settings > AI, add your origin to **CORS Allowed Origins** + +## Continue (VS Code / JetBrains) + +[Continue](https://continue.dev) is an open-source AI code assistant that works with VS Code and JetBrains IDEs. + +### Configuration + +Edit your Continue configuration file (`~/.continue/config.json`): + +```json +{ + "models": [ + { + "title": "Docker Model Runner", + "provider": "openai", + "model": "ai/qwen2.5-coder", + "apiBase": "http://localhost:12434/engines/v1", + "apiKey": "not-needed" + } + ] +} +``` + +### Using Ollama provider + +Continue also supports the Ollama provider, which works with DMR: + +```json +{ + "models": [ + { + "title": "Docker Model Runner (Ollama)", + "provider": "ollama", + "model": "ai/qwen2.5-coder", + "apiBase": "http://localhost:12434" + } + ] +} +``` + +## Cursor + +[Cursor](https://cursor.sh) is an AI-powered code editor. + +### Configuration + +1. Open Cursor Settings (Cmd/Ctrl + ,). +2. Navigate to **Models** > **OpenAI API Key**. +3. Configure: + + | Setting | Value | + |---------|-------| + | OpenAI API Key | `not-needed` | + | Override OpenAI Base URL | `http://localhost:12434/engines/v1` | + +4. In the model drop-down, enter your model name: `ai/qwen2.5-coder` + +> [!NOTE] +> Some Cursor features may require models with specific capabilities (e.g., function calling). +> Use capable models like `ai/qwen2.5-coder` or `ai/llama3.2` for best results. + +## Zed + +[Zed](https://zed.dev) is a high-performance code editor with AI features. + +### Configuration + +Edit your Zed settings (`~/.config/zed/settings.json`): + +```json +{ + "language_models": { + "openai": { + "api_url": "http://localhost:12434/engines/v1", + "available_models": [ + { + "name": "ai/qwen2.5-coder", + "display_name": "Qwen 2.5 Coder (DMR)", + "max_tokens": 8192 + } + ] + } + } +} +``` + +## Open WebUI + +[Open WebUI](https://github.com/open-webui/open-webui) provides a ChatGPT-like interface for local models. + +See [Open WebUI integration](openwebui-integration.md) for detailed setup instructions. + +## Aider + +[Aider](https://aider.chat) is an AI pair programming tool for the terminal. + +### Configuration + +Set environment variables or use command-line flags: + +```bash +export OPENAI_API_BASE=http://localhost:12434/engines/v1 +export OPENAI_API_KEY=not-needed + +aider --model openai/ai/qwen2.5-coder +``` + +Or in a single command: + +```console +$ aider --openai-api-base http://localhost:12434/engines/v1 \ + --openai-api-key not-needed \ + --model openai/ai/qwen2.5-coder +``` + +## LangChain + +### Python + +```python +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI( + base_url="http://localhost:12434/engines/v1", + api_key="not-needed", + model="ai/qwen2.5-coder" +) + +response = llm.invoke("Write a hello world function in Python") +print(response.content) +``` + +### JavaScript/TypeScript + +```typescript +import { ChatOpenAI } from "@langchain/openai"; + +const model = new ChatOpenAI({ + configuration: { + baseURL: "http://localhost:12434/engines/v1", + }, + apiKey: "not-needed", + modelName: "ai/qwen2.5-coder", +}); + +const response = await model.invoke("Write a hello world function"); +console.log(response.content); +``` + +## LlamaIndex + +```python +from llama_index.llms.openai_like import OpenAILike + +llm = OpenAILike( + api_base="http://localhost:12434/engines/v1", + api_key="not-needed", + model="ai/qwen2.5-coder" +) + +response = llm.complete("Write a hello world function") +print(response.text) +``` + +## OpenCode + +[OpenCode](https://opencode.ai/) is an open-source coding assistant designed to integrate directly into developer workflows. It supports multiple model providers and exposes a flexible configuration system that makes it easy to switch between them. + +See [Use OpenCode with Docker Model Runner](../../../guides/opencode-model-runner.md) +for a task-focused guide that walks through model setup, configuration, and +troubleshooting. + +### Configuration + +1. Install OpenCode (see [docs](https://opencode.ai/docs/#install)) +2. Reference DMR in your OpenCode configuration, either globally at `~/.config/opencode/opencode.json` or project specific with a `opencode.json` file in the root of your project + ```json + { + "$schema": "https://opencode.ai/config.json", + "provider": { + "dmr": { + "npm": "@ai-sdk/openai-compatible", + "name": "Docker Model Runner", + "options": { + "baseURL": "http://localhost:12434/v1" + }, + "models": { + "ai/qwen2.5-coder": { + "name": "ai/qwen2.5-coder" + }, + "ai/llama3.2": { + "name": "ai/llama3.2" + } + } + } + } + } + ``` +3. Select the model you want in OpenCode + +You can find more details in [this Docker Blog post](https://www.docker.com/blog/opencode-docker-model-runner-private-ai-coding/) + +## Claude Code + +[Claude Code](https://claude.com/product/claude-code) is [Anthropic's](https://www.anthropic.com/) command-line tool for agentic coding. It lives in your terminal, understands your codebase, and executes routine tasks, explains complex code, and handles Git workflows through natural language commands. + +See [Use Claude Code with Docker Model Runner](../../../guides/claude-code-model-runner.md) +for a task-focused guide that walks through model setup, configuration, and +inspecting requests. To run Claude Code in an isolated Docker Sandbox against +a local model, see +[Run Claude Code in a Docker Sandbox with Docker Model Runner](../../../guides/claude-code-sandbox-model-runner.md). + +### Configuration + +1. Install Claude Code (see [docs](https://code.claude.com/docs/en/quickstart#step-1-install-claude-code)) +2. Use the `ANTHROPIC_BASE_URL` environment variable to point Claude Code at DMR. On Mac or Linux, you can do this, for example if you want to use the `gpt-oss:32k` model: + ```bash + ANTHROPIC_BASE_URL=http://localhost:12434 claude --model qwen2.5-coder + ``` + On Windows (PowerShell) you can do it like this: + ```powershell + $env:ANTHROPIC_BASE_URL="http://localhost:12434" + claude --model gpt-oss:32k + ``` + +> [!TIP] +> +> To avoid setting the variable each time, add it to your shell profile (`~/.bashrc`, `~/.zshrc`, or equivalent): +> +> ```shell +> export ANTHROPIC_BASE_URL=http://localhost:12434 +> ``` + +You can find more details in [this Docker Blog post](https://www.docker.com/blog/run-claude-code-locally-docker-model-runner/) + +> [!NOTE] +> +> While the other integrations on this page use the [OpenAI-compatible API](/ai/model-runner/api-reference/#openai-compatible-api), DMR also exposes a [Anthropic-compatible API](/ai/model-runner/api-reference/#anthropic-compatible-api) used here. + +## Common issues + +### "Connection refused" errors + +1. Ensure Docker Model Runner is enabled and running: + ```console + $ docker model status + ``` + +2. Verify TCP access is enabled: + ```console + $ curl http://localhost:12434/engines/v1/models + ``` + +3. Check if another service is using port 12434. + +4. If you run your tool in WSL and want to connect to DMR on the host via `localhost`, this might not directly work. Configuring WSL to use [mirrored networking](https://learn.microsoft.com/en-us/windows/wsl/networking#mirrored-mode-networking) can solve this. + +### "Model not found" errors + +1. Verify the model is pulled: + ```console + $ docker model list + ``` + +2. Use the full model name including namespace (e.g., `ai/qwen2.5-coder`, not just `qwen2.5-coder`). + +### Slow responses or timeouts + +1. For first requests, models need to load into memory. Subsequent requests are faster. + +2. Consider using a smaller model or adjusting the context size: + ```console + $ docker model configure --context-size 4096 ai/qwen2.5-coder + ``` + +3. Check available system resources (RAM, GPU memory). + +### CORS errors (web-based tools) + +If using browser-based tools, add the origin to CORS allowed origins: + +1. Docker Desktop: Settings > AI > CORS Allowed Origins +2. Add your tool's URL (e.g., `http://localhost:3000`) + +## Recommended models by use case + +| Use case | Recommended model | Notes | +|----------|-------------------|-------| +| Code completion | `ai/qwen3-coder` | Optimized for coding tasks with a large context window | +| Agentic coding | `ai/devstral-small-2` | Good fit for tools such as Claude Code and OpenCode | +| General assistant | `ai/llama3.2` | Good balance of capabilities | +| Small/fast | `ai/smollm2` | Low resource usage | +| Embeddings | `ai/all-minilm` | For RAG and semantic search | + +## What's next + +- [API reference](api-reference.md) - Full API documentation +- [Configuration options](configuration.md) - Tune model behavior +- [Open WebUI integration](openwebui-integration.md) - Set up a web interface diff --git a/content/manuals/ai/model-runner/images/dmr-catalog.png b/content/manuals/ai/model-runner/images/dmr-catalog.png new file mode 100644 index 00000000000..15d8bd04df1 Binary files /dev/null and b/content/manuals/ai/model-runner/images/dmr-catalog.png differ diff --git a/content/manuals/ai/model-runner/images/dmr-logs.png b/content/manuals/ai/model-runner/images/dmr-logs.png new file mode 100644 index 00000000000..e2b2289e988 Binary files /dev/null and b/content/manuals/ai/model-runner/images/dmr-logs.png differ diff --git a/content/manuals/ai/model-runner/images/dmr-run.png b/content/manuals/ai/model-runner/images/dmr-run.png new file mode 100644 index 00000000000..c12b3bd5fdd Binary files /dev/null and b/content/manuals/ai/model-runner/images/dmr-run.png differ diff --git a/content/manuals/ai/model-runner/inference-engines.md b/content/manuals/ai/model-runner/inference-engines.md new file mode 100644 index 00000000000..952e2948933 --- /dev/null +++ b/content/manuals/ai/model-runner/inference-engines.md @@ -0,0 +1,410 @@ +--- +title: Inference engines +description: Learn about the llama.cpp, vLLM, and Diffusers inference engines in Docker Model Runner. +weight: 50 +keywords: Docker, ai, model runner, llama.cpp, vllm, diffusers, inference, gguf, safetensors, cuda, gpu, image generation, stable diffusion +--- + +Docker Model Runner supports three inference engines: **llama.cpp**, **vLLM**, and **Diffusers**. +Each engine has different strengths, supported platforms, and model format +requirements. This guide helps you choose the right engine and configure it for +your use case. + +## Engine comparison + +| Feature | llama.cpp | vLLM | Diffusers | +|---------|-----------|------|-------------------------------------| +| **Model formats** | GGUF | Safetensors, HuggingFace | DDUF | +| **Platforms** | All (macOS, Windows, Linux) | Linux x86_64 only | Linux (x86_64, ARM64) | +| **GPU support** | NVIDIA, AMD, Apple Silicon, Vulkan | NVIDIA CUDA only | NVIDIA CUDA only | +| **CPU inference** | Yes | No | No | +| **Quantization** | Built-in (Q4, Q5, Q8, etc.) | Limited | Limited | +| **Memory efficiency** | High (with quantization) | Moderate | Moderate | +| **Throughput** | Good | High (with batching) | Good | +| **Best for** | Local development, resource-constrained environments | Production, high throughput | Image generation | +| **Use case** | Text generation (LLMs) | Text generation (LLMs) | Image generation (Stable Diffusion) | + +## llama.cpp + +[llama.cpp](https://github.com/ggerganov/llama.cpp) is the default inference +engine in Docker Model Runner. It's designed for efficient local inference and +supports a wide range of hardware configurations. + +### Platform support + +| Platform | GPU support | Notes | +|----------|-------------|-------| +| macOS (Apple Silicon) | Metal | Automatic GPU acceleration | +| Windows (x64) | NVIDIA CUDA | Requires NVIDIA drivers 576.57+ | +| Windows (ARM64) | Adreno OpenCL | Qualcomm 6xx series and later | +| Linux (x64) | NVIDIA, AMD, Vulkan | Multiple backend options | +| Linux | CPU only | Works on any x64/ARM64 system | + +### Model format: GGUF + +llama.cpp uses the GGUF format, which supports efficient quantization for reduced +memory usage without significant quality loss. + +#### Quantization levels + +| Quantization | Bits per weight | Memory usage | Quality | +|--------------|-----------------|--------------|---------| +| Q2_K | ~2.5 | Lowest | Reduced | +| Q3_K_M | ~3.5 | Minimal | Acceptable | +| Q4_K_M | ~4.5 | Low | Good | +| Q5_K_M | ~5.5 | Moderate | Excellent | +| Q6_K | ~6.5 | Higher | Excellent | +| Q8_0 | 8 | High | Near-original | +| F16 | 16 | Highest | Original | + +**Recommended**: Q4_K_M offers the best balance of quality and memory usage for +most use cases. + +#### Pulling quantized models + +Models on Docker Hub often include quantization in the tag: + +```console +$ docker model pull ai/llama3.2:3B-Q4_K_M +``` + +### Using llama.cpp + +llama.cpp is the default engine. No special configuration is required: + +```console +$ docker model run ai/smollm2 +``` + +To explicitly specify llama.cpp when running models: + +```console +$ docker model run ai/smollm2 --backend llama.cpp +``` + +### llama.cpp API endpoints + +When using llama.cpp, API calls use the llama.cpp engine path: + +```text +POST /engines/llama.cpp/v1/chat/completions +``` + +Or without the engine prefix: + +```text +POST /engines/v1/chat/completions +``` + +## vLLM + +[vLLM](https://github.com/vllm-project/vllm) is a high-performance inference +engine optimized for production workloads with high throughput requirements. + +### Platform support + +| Platform | GPU | Support status | +|----------|-----|----------------| +| Linux x86_64 | NVIDIA CUDA | Supported | +| Windows with WSL2 | NVIDIA CUDA | Supported (Docker Desktop 4.54+) | +| macOS | - | Not supported | +| Linux ARM64 | - | Not supported | +| AMD GPUs | - | Not supported | + +> [!IMPORTANT] +> vLLM requires an NVIDIA GPU with CUDA support. It does not support CPU-only +> inference. + +### Model format: Safetensors + +vLLM works with models in Safetensors format, which is the standard format for +HuggingFace models. These models typically use more memory than quantized GGUF +models but may offer better quality and faster inference on powerful hardware. + +### Setting up vLLM + +#### Docker Engine (Linux) + +Install the Model Runner with vLLM backend: + +```console +$ docker model install-runner --backend vllm --gpu cuda +``` + +Verify the installation: + +```console +$ docker model status +Docker Model Runner is running + +Status: +llama.cpp: running llama.cpp version: c22473b +vllm: running vllm version: 0.11.0 +``` + +#### Docker Desktop (Windows with WSL2) + +1. Ensure you have: + - Docker Desktop 4.54 or later (minimum version for vLLM support) + - NVIDIA GPU with updated drivers + - WSL2 enabled + +2. Install vLLM backend: + ```console + $ docker model install-runner --backend vllm --gpu cuda + ``` + +### Running models with vLLM + +vLLM models are typically tagged with `-vllm` suffix: + +```console +$ docker model run ai/smollm2-vllm +``` + +To specify the vLLM backend explicitly: + +```console +$ docker model run ai/model --backend vllm +``` + +### vLLM API endpoints + +When using vLLM, specify the engine in the API path: + +```text +POST /engines/vllm/v1/chat/completions +``` + +### vLLM configuration + +#### HuggingFace overrides + +Use `--hf_overrides` to pass model configuration overrides: + +```console +$ docker model configure --hf_overrides '{"max_model_len": 8192}' ai/model-vllm +``` + +#### Common vLLM settings + +| Setting | Description | Example | +|---------|-------------|---------| +| `max_model_len` | Maximum context length | 8192 | +| `gpu_memory_utilization` | Fraction of GPU memory to use | 0.9 | +| `tensor_parallel_size` | GPUs for tensor parallelism | 2 | + +### vLLM and llama.cpp performance comparison + +| Scenario | Recommended engine | +|----------|-------------------| +| Single user, local development | llama.cpp | +| Multiple concurrent requests | vLLM | +| Limited GPU memory | llama.cpp (with quantization) | +| Maximum throughput | vLLM | +| CPU-only system | llama.cpp | +| Apple Silicon Mac | llama.cpp | +| Production deployment | vLLM (if hardware supports it) | + +## Diffusers + +[Diffusers](https://github.com/huggingface/diffusers) is an inference engine +for image generation models, including Stable Diffusion. Unlike llama.cpp and +vLLM which focus on text generation with LLMs, Diffusers enables you to generate +images from text prompts. + +### Platform support + +| Platform | GPU | Support status | +|----------|-----|----------------| +| Linux x86_64 | NVIDIA CUDA | Supported | +| Linux ARM64 | NVIDIA CUDA | Supported | +| Windows | - | Not supported | +| macOS | - | Not supported | + +> [!IMPORTANT] +> Diffusers requires an NVIDIA GPU with CUDA support. It does not support +> CPU-only inference. + +### Setting up Diffusers + +Install the Model Runner with Diffusers backend: + +```console +$ docker model reinstall-runner --backend diffusers --gpu cuda +``` + +Verify the installation: + +```console +$ docker model status +Docker Model Runner is running + +Status: +llama.cpp: running llama.cpp version: 34ce48d +mlx: not installed +sglang: sglang package not installed +vllm: vLLM binary not found +diffusers: running diffusers version: 0.36.0 +``` + +### Pulling Diffusers models + +Pull a Stable Diffusion model: + +```console +$ docker model pull stable-diffusion:Q4 +``` + +### Generating images with Diffusers + +Diffusers uses an image generation API endpoint. To generate an image: + +```console +$ curl -s -X POST http://localhost:12434/engines/diffusers/v1/images/generations \ + -H "Content-Type: application/json" \ + -d '{ + "model": "stable-diffusion:Q4", + "prompt": "A picture of a nice cat", + "size": "512x512" + }' | jq -r '.data[0].b64_json' | base64 -d > image.png +``` + +This command: +1. Sends a POST request to the Diffusers image generation endpoint +2. Specifies the model, prompt, and output image size +3. Extracts the base64-encoded image from the response +4. Decodes it and saves it as `image.png` + +### Diffusers API endpoint + +When using Diffusers, specify the engine in the API path: + +```text +POST /engines/diffusers/v1/images/generations +``` + +### Supported parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `model` | string | Required. The model identifier (e.g., `stable-diffusion:Q4`). | +| `prompt` | string | Required. The text description of the image to generate. | +| `size` | string | Image dimensions in `WIDTHxHEIGHT` format (e.g., `512x512`). | + +## Running multiple engines + +You can run llama.cpp, vLLM, and Diffusers simultaneously. Docker Model Runner routes +requests to the appropriate engine based on the model or explicit engine selection. + +Check which engines are running: + +```console +$ docker model status +Docker Model Runner is running + +Status: +llama.cpp: running llama.cpp version: 34ce48d +mlx: not installed +sglang: sglang package not installed +vllm: running vllm version: 0.11.0 +diffusers: running diffusers version: 0.36.0 +``` + +### Engine-specific API paths + +| Engine | API path | Use case | +|--------|----------|----------| +| llama.cpp | `/engines/llama.cpp/v1/chat/completions` | Text generation | +| vLLM | `/engines/vllm/v1/chat/completions` | Text generation | +| Diffusers | `/engines/diffusers/v1/images/generations` | Image generation | +| Auto-select | `/engines/v1/chat/completions` | Text generation (auto-selects engine) | + +## Managing inference engines + +### Install an engine + +```console +$ docker model install-runner --backend [--gpu ] +``` + +Options: +- `--backend`: `llama.cpp`, `vllm`, or `diffusers` +- `--gpu`: `cuda`, `rocm`, `vulkan`, or `metal` (depends on platform) + +### Reinstall an engine + +```console +$ docker model reinstall-runner --backend +``` + +### Check engine status + +```console +$ docker model status +``` + +### View engine logs + +```console +$ docker model logs +``` + +## Packaging models for each engine + +### Package a GGUF model (llama.cpp) + +```console +$ docker model package --gguf ./model.gguf --push myorg/mymodel:Q4_K_M +``` + +### Package a Safetensors model (vLLM) + +```console +$ docker model package --safetensors ./model/ --push myorg/mymodel-vllm +``` + +## Troubleshooting + +### vLLM won't start + +1. Verify NVIDIA GPU is available: + ```console + $ nvidia-smi + ``` + +2. Check Docker has GPU access: + ```console + $ docker run --rm --gpus all nvidia/cuda:12.0-base nvidia-smi + ``` + +3. Verify you're on a supported platform (Linux x86_64 or Windows WSL2). + +### llama.cpp is slow + +1. Ensure GPU acceleration is working (check logs for Metal/CUDA messages). + +2. Try a more aggressive quantization: + ```console + $ docker model pull ai/model:Q4_K_M + ``` + +3. Reduce context size: + ```console + $ docker model configure --context-size 2048 ai/model + ``` + +### Out of memory errors + +1. Use a smaller quantization (Q4 instead of Q8). +2. Reduce context size. +3. For vLLM, adjust `gpu_memory_utilization`: + ```console + $ docker model configure --hf_overrides '{"gpu_memory_utilization": 0.8}' ai/model + ``` + +## What's next + +- [Configuration options](configuration.md) - Detailed parameter reference +- [API reference](api-reference.md) - API documentation +- [GPU support](/manuals/desktop/features/gpu.md) - GPU configuration for Docker Desktop diff --git a/content/manuals/ai/model-runner/openwebui-integration.md b/content/manuals/ai/model-runner/openwebui-integration.md new file mode 100644 index 00000000000..1e8cdd5805a --- /dev/null +++ b/content/manuals/ai/model-runner/openwebui-integration.md @@ -0,0 +1,293 @@ +--- +title: Open WebUI integration +description: Set up Open WebUI as a ChatGPT-like interface for Docker Model Runner. +weight: 45 +keywords: Docker, ai, model runner, open webui, openwebui, chat interface, ollama, ui +--- + +[Open WebUI](https://github.com/open-webui/open-webui) is an open-source, +self-hosted web interface that provides a ChatGPT-like experience for local +AI models. You can connect it to Docker Model Runner to get a polished chat +interface for your models. + +## Prerequisites + +- Docker Model Runner enabled with TCP access +- A model pulled (e.g., `docker model pull ai/llama3.2`) + +## Quick start with Docker Compose + +The easiest way to run Open WebUI with Docker Model Runner is using Docker Compose. + +Create a `compose.yaml` file: + +```yaml +services: + open-webui: + image: ghcr.io/open-webui/open-webui:main + ports: + - "3000:8080" + environment: + - OLLAMA_BASE_URL=http://host.docker.internal:12434 + - WEBUI_AUTH=false + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - open-webui:/app/backend/data + +volumes: + open-webui: +``` + +Start the services: + +```console +$ docker compose up -d +``` + +Open your browser to [http://localhost:3000](http://localhost:3000). + +## Configuration options + +### Environment variables + +| Variable | Description | Default | +|----------|-------------|---------| +| `OLLAMA_BASE_URL` | URL of Docker Model Runner | Required | +| `WEBUI_AUTH` | Enable authentication | `true` | +| `OPENAI_API_BASE_URL` | Use OpenAI-compatible API instead | - | +| `OPENAI_API_KEY` | API key (use any value for DMR) | - | + +### Using OpenAI-compatible API + +If you prefer to use the OpenAI-compatible API instead of the Ollama API: + +```yaml +services: + open-webui: + image: ghcr.io/open-webui/open-webui:main + ports: + - "3000:8080" + environment: + - OPENAI_API_BASE_URL=http://host.docker.internal:12434/engines/v1 + - OPENAI_API_KEY=not-needed + - WEBUI_AUTH=false + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - open-webui:/app/backend/data + +volumes: + open-webui: +``` + +## Network configuration + +### Docker Desktop + +On Docker Desktop, `host.docker.internal` automatically resolves to the host machine. +The previous example works without modification. + +### Docker Engine (Linux) + +On Docker Engine, you may need to configure the network differently: + +```yaml +services: + open-webui: + image: ghcr.io/open-webui/open-webui:main + network_mode: host + environment: + - OLLAMA_BASE_URL=http://localhost:12434 + - WEBUI_AUTH=false + volumes: + - open-webui:/app/backend/data + +volumes: + open-webui: +``` + +Or use the host gateway: + +```yaml +services: + open-webui: + image: ghcr.io/open-webui/open-webui:main + ports: + - "3000:8080" + environment: + - OLLAMA_BASE_URL=http://172.17.0.1:12434 + - WEBUI_AUTH=false + volumes: + - open-webui:/app/backend/data + +volumes: + open-webui: +``` + +## Using Open WebUI + +### Select a model + +1. Open [http://localhost:3000](http://localhost:3000) +2. Select the model drop-down in the top-left +3. Select from your pulled models (they appear with `ai/` prefix) + +### Pull models through the UI + +Open WebUI can pull models directly: + +1. Select the model drop-down +2. Enter a model name: `ai/llama3.2` +3. Select the download icon + +### Chat features + +Open WebUI provides: + +- Multi-turn conversations with context +- Message editing and regeneration +- Code syntax highlighting +- Markdown rendering +- Conversation history and search +- Export conversations + +## Complete example with multiple models + +This example sets up Open WebUI with Docker Model Runner and pre-pulls several models: + +```yaml +services: + open-webui: + image: ghcr.io/open-webui/open-webui:main + ports: + - "3000:8080" + environment: + - OLLAMA_BASE_URL=http://host.docker.internal:12434 + - WEBUI_AUTH=false + - DEFAULT_MODELS=ai/llama3.2 + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - open-webui:/app/backend/data + depends_on: + model-setup: + condition: service_completed_successfully + + model-setup: + image: docker:cli + volumes: + - /var/run/docker.sock:/var/run/docker.sock + command: > + sh -c " + docker model pull ai/llama3.2 && + docker model pull ai/qwen2.5-coder && + docker model pull ai/smollm2 + " + +volumes: + open-webui: +``` + +## Enabling authentication + +For multi-user setups or security, enable authentication: + +```yaml +services: + open-webui: + image: ghcr.io/open-webui/open-webui:main + ports: + - "3000:8080" + environment: + - OLLAMA_BASE_URL=http://host.docker.internal:12434 + - WEBUI_AUTH=true + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - open-webui:/app/backend/data + +volumes: + open-webui: +``` + +On first visit, you'll create an admin account. + +## Troubleshooting + +### Models don't appear in the drop-down + +1. Verify Docker Model Runner is accessible: + ```console + $ curl http://localhost:12434/api/tags + ``` + +2. Check that models are pulled: + ```console + $ docker model list + ``` + +3. Verify the `OLLAMA_BASE_URL` is correct and accessible from the container. + +### "Connection refused" errors + +1. Ensure TCP access is enabled for Docker Model Runner. + +2. On Docker Desktop, verify `host.docker.internal` resolves: + ```console + $ docker run --rm alpine ping -c 1 host.docker.internal + ``` + +3. On Docker Engine, try using `network_mode: host` or the explicit host IP. + +### Slow response times + +1. First requests load the model into memory, which takes time. + +2. Subsequent requests are much faster. + +3. If consistently slow, consider: + - Using a smaller model + - Reducing context size + - Checking GPU acceleration is working + +### CORS errors + +If running Open WebUI on a different host: + +1. In Docker Desktop, go to Settings > AI +2. Add the Open WebUI URL to **CORS Allowed Origins** + +## Customization + +### Custom system prompts + +Open WebUI supports setting system prompts per model. Configure these in the UI under Settings > Models. + +### Model parameters + +Adjust model parameters in the chat interface: + +1. Select the settings icon next to the model name +2. Adjust temperature, top-p, max tokens, etc. + +These settings are passed through to Docker Model Runner. + +## Running on a different port + +To run Open WebUI on a different port: + +```yaml +services: + open-webui: + image: ghcr.io/open-webui/open-webui:main + ports: + - "8080:8080" # Change first port number + # ... rest of config +``` + +## What's next + +- [API reference](api-reference.md) - Learn about the APIs Open WebUI uses +- [Configuration options](configuration.md) - Tune model behavior +- [IDE integrations](ide-integrations.md) - Connect other tools to DMR diff --git a/content/manuals/ai/sandboxes/_index.md b/content/manuals/ai/sandboxes/_index.md new file mode 100644 index 00000000000..56e0930493e --- /dev/null +++ b/content/manuals/ai/sandboxes/_index.md @@ -0,0 +1,87 @@ +--- +title: Docker Sandboxes +description: Run AI coding agents in isolated environments +keywords: docker sandboxes, sbx, ai agents, sandboxed agents, microVM +weight: 10 +params: + sidebar: + group: AI and agents + badge: + color: blue + text: Early Access +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Docker Sandboxes run AI coding agents in isolated microVM sandboxes. Each +sandbox gets its own Docker daemon, filesystem, and network — the agent can +build containers, install packages, and modify files without touching your host +system. + +Organization admins can +[centrally manage sandbox network and filesystem policies](security/governance.md) +from the Docker Admin Console, so the same rules apply uniformly across every +developer's machine. Available on a separate paid subscription. + +## Get started + +Install the `sbx` CLI and sign in: + +{{< tabs >}} +{{< tab name="macOS" >}} + +```console +$ brew install docker/tap/sbx +$ sbx login +``` + +{{< /tab >}} +{{< tab name="Windows" >}} + +```powershell +> winget install -h Docker.sbx +> sbx login +``` + +{{< /tab >}} +{{< tab name="Linux (Ubuntu)" >}} + +```console +$ curl -fsSL https://get.docker.com | sudo REPO_ONLY=1 sh +$ sudo apt-get install docker-sbx +$ sudo usermod -aG kvm $USER +$ newgrp kvm +$ sbx login +``` + +{{< /tab >}} +{{< /tabs >}} + +Then launch an agent in a sandbox: + +```console +$ cd ~/my-project +$ sbx run claude +``` + +See the [get started guide](get-started.md) for a full walkthrough, or jump to +the [usage guide](usage.md) for common patterns. + +## Learn more + +- [Agents](agents/) — supported agents and per-agent configuration +- [Customize](customize/) — reusable templates and declarative kits for + extending or tailoring sandboxes +- [Architecture](architecture.md) — microVM isolation, workspace mounting, + networking +- [Security](security/) — isolation model, credential handling, network + policies, workspace trust +- [CLI reference](/reference/cli/sbx/) — full list of `sbx` commands and options +- [Troubleshooting](troubleshooting.md) — common issues and fixes +- [FAQ](faq.md) — login requirements, telemetry, etc + +## Feedback + +Your feedback shapes what gets built next. If you run into a bug, hit a +missing feature, or have a suggestion, open an issue at +[github.com/docker/sbx-releases/issues](https://github.com/docker/sbx-releases/issues). diff --git a/content/manuals/ai/sandboxes/agents/_index.md b/content/manuals/ai/sandboxes/agents/_index.md new file mode 100644 index 00000000000..b80fb413af4 --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/_index.md @@ -0,0 +1,25 @@ +--- +title: Supported agents +linkTitle: Agents +weight: 30 +description: AI coding agents supported by Docker Sandboxes. +keywords: docker sandboxes, ai agents, claude code, codex, cursor, gemini +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Docker Sandboxes runs the following agents out of the box: + +- [Claude Code](claude-code/) +- [Codex](codex/) +- [Copilot](copilot/) +- [Cursor](cursor/) +- [Droid](droid/) +- [Gemini](gemini/) +- [Kiro](kiro/) +- [OpenCode](opencode/) +- [Docker Agent](docker-agent/) +- [Shell](shell/) — agent-less sandbox for manual setup or testing + +Want to pre-install tools or customize an agent's environment? +See [Customize](../customize/). diff --git a/content/manuals/ai/sandboxes/agents/claude-code.md b/content/manuals/ai/sandboxes/agents/claude-code.md new file mode 100644 index 00000000000..2179d657edb --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/claude-code.md @@ -0,0 +1,81 @@ +--- +title: Claude Code +weight: 10 +description: | + Use Claude Code in Docker Sandboxes with authentication, configuration, and + YOLO mode for AI-assisted development. +keywords: docker sandboxes, claude code, anthropic, ai agent, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Official documentation: [Claude Code](https://code.claude.com/docs) + +## Quick start + +Launch Claude Code in a sandbox by pointing it at a project directory: + +```console +$ sbx run claude ~/my-project +``` + +The workspace parameter defaults to the current directory, so `sbx run claude` +from inside your project works too. To start Claude with a specific prompt: + +```console +$ sbx run claude --name my-sandbox -- "Add error handling to the login function" +``` + +Everything after `--` is passed directly to Claude Code. You can also pipe in a +prompt from a file with `-- "$(cat prompt.txt)"`. + +## Authentication + +Claude Code requires either an Anthropic API key or a Claude subscription. + +**API key**: Store your key using +[stored secrets](../security/credentials.md#stored-secrets): + +```console +$ sbx secret set -g anthropic +``` + +Alternatively, export the `ANTHROPIC_API_KEY` environment variable in your +shell before running the sandbox. See +[Credentials](../security/credentials.md) for details on both methods. + +**Claude subscription**: If no API key is set, Claude Code prompts you to +authenticate interactively using OAuth. The proxy handles the OAuth flow, so +credentials aren't stored inside the sandbox. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host, such as +`~/.claude`. Only project-level configuration in the working directory is +available inside the sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +Any Claude Code CLI options can be passed after the `--` separator: + +```console +$ sbx run claude --name my-sandbox -- --continue +``` + +See the [Claude Code CLI reference](https://code.claude.com/docs/en/cli-reference) +for available options. + +## Base image + +The sandbox uses `docker/sandbox-templates:claude-code` and launches Claude Code +with `--dangerously-skip-permissions` by default. See +[Templates](../customize/templates.md) to build your own image on top of +this base. + +## Use a local model + +To run Claude Code in a sandbox against a local model on your host through +Docker Model Runner, see +[Run Claude Code in a Docker Sandbox with Docker Model Runner](/guides/claude-code-sandbox-model-runner/). +For the host-only version without a sandbox, see +[Use Claude Code with Docker Model Runner](/guides/claude-code-model-runner/). diff --git a/content/manuals/ai/sandboxes/agents/codex.md b/content/manuals/ai/sandboxes/agents/codex.md new file mode 100644 index 00000000000..ddf526f9097 --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/codex.md @@ -0,0 +1,81 @@ +--- +title: Codex +weight: 20 +description: | + Use OpenAI Codex in Docker Sandboxes with API key authentication and YOLO + mode configuration. +keywords: docker sandboxes, codex, openai, ai agent, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This guide covers authentication, configuration, and usage of Codex in a +sandboxed environment. + +Official documentation: [Codex CLI](https://developers.openai.com/codex/cli) + +## Quick start + +Create a sandbox and run Codex for a project directory: + +```console +$ sbx run codex ~/my-project +``` + +The workspace parameter is optional and defaults to the current directory: + +```console +$ cd ~/my-project +$ sbx run codex +``` + +## Authentication + +Codex supports two authentication methods: an API key or OAuth. + +**API key**: Store your OpenAI API key using +[stored secrets](../security/credentials.md#stored-secrets): + +```console +$ sbx secret set -g openai +``` + +Alternatively, export the `OPENAI_API_KEY` environment variable in your shell +before running the sandbox. + +**OAuth**: If you prefer not to use an API key, start the OAuth flow on your +host with: + +```console +$ sbx secret set -g openai --oauth +``` + +This opens a browser window for authentication and stores the resulting tokens +in your OS keychain. The OAuth flow runs on the host, not inside the sandbox, +so browser-based authentication works without any extra setup. + +See [Credentials](../security/credentials.md) for more details. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host, such as +`~/.codex`. Only project-level configuration in the working directory is +available inside the sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +The sandbox runs Codex without approval prompts by default. Pass additional +Codex CLI options after `--`: + +```console +$ sbx run codex --name -- +``` + +## Base image + +Template: `docker/sandbox-templates:codex` + +Preconfigured to run without approval prompts. + +See [Customize](../customize/) to pre-install tools or customize this +environment. diff --git a/content/manuals/ai/sandboxes/agents/copilot.md b/content/manuals/ai/sandboxes/agents/copilot.md new file mode 100644 index 00000000000..26f119703c6 --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/copilot.md @@ -0,0 +1,71 @@ +--- +title: Copilot +weight: 30 +description: | + Use GitHub Copilot in Docker Sandboxes with GitHub token authentication and + trusted folder configuration. +keywords: docker sandboxes, github copilot, ai agent, github token, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This guide covers authentication, configuration, and usage of GitHub Copilot +in a sandboxed environment. + +Official documentation: [GitHub Copilot CLI](https://docs.github.com/en/copilot/how-tos/copilot-cli) + +## Quick start + +Create a sandbox and run Copilot for a project directory: + +```console +$ sbx run copilot ~/my-project +``` + +The workspace parameter is optional and defaults to the current directory: + +```console +$ cd ~/my-project +$ sbx run copilot +``` + +## Authentication + +Copilot requires a GitHub token with Copilot access. Store your token using +[stored secrets](../security/credentials.md#stored-secrets): + +```console +$ echo "$(gh auth token)" | sbx secret set -g github +``` + +Alternatively, export the `GH_TOKEN` or `GITHUB_TOKEN` environment variable in +your shell before running the sandbox. See +[Credentials](../security/credentials.md) for details on both methods. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host. Only +project-level configuration in the working directory is available inside the +sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +Copilot is configured to trust the workspace directory by default, so it +operates without repeated confirmations for workspace files. + +### Pass options at runtime + +Pass Copilot CLI options after `--`: + +```console +$ sbx run copilot --name -- +``` + +## Base image + +Template: `docker/sandbox-templates:copilot` + +Preconfigured to trust the workspace directory and run without approval prompts. + +See [Customize](../customize/) to pre-install tools or customize this +environment. diff --git a/content/manuals/ai/sandboxes/agents/cursor.md b/content/manuals/ai/sandboxes/agents/cursor.md new file mode 100644 index 00000000000..1753be436cd --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/cursor.md @@ -0,0 +1,79 @@ +--- +title: Cursor +weight: 33 +description: | + Use Cursor in Docker Sandboxes with API key or proxy-managed OAuth + authentication. +keywords: docker sandboxes, cursor, cursor agent, ai agent, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This guide covers authentication, configuration, and usage of Cursor in a +sandboxed environment. + +Official documentation: [Cursor CLI](https://cursor.com/cli) + +## Quick start + +Create a sandbox and run Cursor for a project directory: + +```console +$ sbx run cursor ~/my-project +``` + +The workspace parameter is optional and defaults to the current directory: + +```console +$ cd ~/my-project +$ sbx run cursor +``` + +## Authentication + +Cursor supports two authentication methods: an API key or OAuth. + +**API key**: Store your Cursor API key using +[stored secrets](../security/credentials.md#stored-secrets): + +```console +$ sbx secret set -g cursor +``` + +Alternatively, export the `CURSOR_API_KEY` environment variable in your shell +before running the sandbox. See +[Credentials](../security/credentials.md) for details on both methods. + +**OAuth**: If no API key is set, Cursor prompts you to sign in interactively +on first run. The proxy intercepts the token exchange with +`api2.cursor.sh/auth/poll`, so credentials are managed by the host and aren't +stored inside the sandbox. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host, such as +`~/.cursor`. Only project-level configuration in the working directory is +available inside the sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +Cursor reads `AGENTS.md` from the workspace for agent-specific instructions. + +The sandbox runs Cursor in YOLO mode by default, which executes commands +without approval prompts. Pass additional `cursor-agent` CLI options after +`--`: + +```console +$ sbx run cursor --name -- +``` + +## Base image + +Template: `docker/sandbox-templates:cursor-agent-docker` + +Preconfigured to run in YOLO mode with HTTP/1.1 and server-sent events for +agent traffic so requests flow through the host proxy. Authentication state +is persisted across sandbox restarts. + +See [Customize](../customize/) to pre-install tools or customize this +environment. diff --git a/content/manuals/ai/sandboxes/agents/docker-agent.md b/content/manuals/ai/sandboxes/agents/docker-agent.md new file mode 100644 index 00000000000..d575a14fef6 --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/docker-agent.md @@ -0,0 +1,73 @@ +--- +title: Docker Agent +weight: 70 +description: | + Use Docker Agent in Docker Sandboxes with multi-provider authentication + supporting OpenAI, Anthropic, and more. +keywords: docker sandboxes, docker agent, openai, anthropic, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Official documentation: [Docker Agent](https://docs.docker.com/ai/docker-agent/) + +## Quick start + +Create a sandbox and run Docker Agent for a project directory: + +```console +$ sbx run docker-agent ~/my-project +``` + +The workspace parameter defaults to the current directory, so +`sbx run docker-agent` from inside your project works too. + +## Authentication + +Docker Agent supports multiple providers. Store keys for the providers you want +to use with [stored secrets](../security/credentials.md#stored-secrets): + +```console +$ sbx secret set -g openai +$ sbx secret set -g anthropic +$ sbx secret set -g google +$ sbx secret set -g xai +$ sbx secret set -g nebius +$ sbx secret set -g mistral +``` + +You only need to configure the providers you want to use. Docker Agent detects +available credentials and routes requests to the appropriate provider. + +Alternatively, export the environment variables (`OPENAI_API_KEY`, +`ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`, `XAI_API_KEY`, `NEBIUS_API_KEY`, +`MISTRAL_API_KEY`) in your shell before running the sandbox. See +[Credentials](../security/credentials.md) for details on both methods. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host. Only +project-level configuration in the working directory is available inside the +sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +The sandbox runs Docker Agent without approval prompts by default. Pass +additional CLI options after `--`: + +```console +$ sbx run docker-agent --name my-sandbox -- +``` + +For example, to specify a custom `agent.yml` configuration file: + +```console +$ sbx run docker-agent -- agent.yml +``` + +## Base image + +The sandbox uses `docker/sandbox-templates:docker-agent` and launches Docker +Agent without approval prompts by default. See +[Templates](../customize/templates.md) to build your own image on top of +this base. diff --git a/content/manuals/ai/sandboxes/agents/droid.md b/content/manuals/ai/sandboxes/agents/droid.md new file mode 100644 index 00000000000..783afae7d86 --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/droid.md @@ -0,0 +1,76 @@ +--- +title: Droid +weight: 35 +description: | + Use Droid in Docker Sandboxes with API key or OAuth authentication. +keywords: docker sandboxes, droid, factory, ai agent, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This guide covers authentication, configuration, and usage of Droid, an AI +coding agent by Factory, in a sandboxed environment. + +Official documentation: [Droid](https://docs.factory.ai/) + +## Quick start + +Create a sandbox and run Droid for a project directory: + +```console +$ sbx run droid ~/my-project +``` + +The workspace parameter is optional and defaults to the current directory: + +```console +$ cd ~/my-project +$ sbx run droid +``` + +## Authentication + +Droid requires a [Factory account](https://factory.ai). Both authentication +methods authenticate you to Factory's service directly — unlike other agents +where you supply a model provider key, Factory manages model access through +your Factory account. + +**API key**: Store your Factory API key using +[stored secrets](../security/credentials.md#stored-secrets): + +```console +$ sbx secret set -g droid +``` + +Alternatively, export the `FACTORY_API_KEY` environment variable in your shell +before running the sandbox. See +[Credentials](../security/credentials.md) for details on both methods. + +**OAuth**: If no API key is set, Droid prompts you to authenticate +interactively on first run. The proxy handles the OAuth flow, so credentials +aren't stored inside the sandbox. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host. Only +project-level configuration in the working directory is available inside the +sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +The sandbox runs Droid without approval prompts by default. Pass additional +`droid` CLI options after `--`: + +```console +$ sbx run droid --name -- +``` + +## Base image + +Template: `docker/sandbox-templates:droid-docker` + +Preconfigured to run without approval prompts. Authentication state is +persisted across sandbox restarts. + +See [Customize](../customize/) to pre-install tools or customize this +environment. diff --git a/content/manuals/ai/sandboxes/agents/gemini.md b/content/manuals/ai/sandboxes/agents/gemini.md new file mode 100644 index 00000000000..5505e5a02e3 --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/gemini.md @@ -0,0 +1,76 @@ +--- +title: Gemini +weight: 40 +description: | + Use Google Gemini in Docker Sandboxes with proxy-managed authentication and + API key configuration. +keywords: docker sandboxes, gemini, google, ai agent, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This guide covers authentication, configuration, and usage of Google Gemini in +a sandboxed environment. + +Official documentation: [Gemini CLI](https://geminicli.com/docs/) + +## Quick start + +Create a sandbox and run Gemini for a project directory: + +```console +$ sbx run gemini ~/my-project +``` + +The workspace parameter is optional and defaults to the current directory: + +```console +$ cd ~/my-project +$ sbx run gemini +``` + +## Authentication + +Gemini requires either a Google API key or a Google account with Gemini access. + +**API key**: Store your key using +[stored secrets](../security/credentials.md#stored-secrets): + +```console +$ sbx secret set -g google +``` + +Alternatively, export the `GEMINI_API_KEY` or `GOOGLE_API_KEY` environment +variable in your shell before running the sandbox. See +[Credentials](../security/credentials.md) for details on both methods. + +**Google account**: If no API key is set, Gemini prompts you to sign in +interactively when it starts. Interactive authentication is scoped to the +sandbox and doesn't persist if you remove and recreate it. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host, such as +`~/.gemini`. Only project-level configuration in the working directory is +available inside the sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +The sandbox runs Gemini without approval prompts by default and disables +Gemini's built-in sandbox tool (since the sandbox itself provides isolation). +Pass additional Gemini CLI options after `--`: + +```console +$ sbx run gemini --name -- +``` + +## Base image + +Template: `docker/sandbox-templates:gemini` + +Gemini is configured to disable its built-in OAuth flow. Authentication is +managed through the proxy with API keys. Preconfigured to run without +approval prompts. + +See [Customize](../customize/) to pre-install tools or customize this +environment. diff --git a/content/manuals/ai/sandboxes/agents/kiro.md b/content/manuals/ai/sandboxes/agents/kiro.md new file mode 100644 index 00000000000..cd49cabed0f --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/kiro.md @@ -0,0 +1,99 @@ +--- +title: Kiro +weight: 50 +description: | + Use Kiro in Docker Sandboxes with device flow authentication for interactive + AI-assisted development. +keywords: docker sandboxes, kiro, ai agent, authentication, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This guide covers authentication, configuration, and usage of Kiro in a +sandboxed environment. + +Official documentation: [Kiro CLI](https://kiro.dev/docs/cli/) + +## Quick start + +Create a sandbox and run Kiro for a project directory: + +```console +$ sbx run kiro ~/my-project +``` + +The workspace parameter is optional and defaults to the current directory: + +```console +$ cd ~/my-project +$ sbx run kiro +``` + +On first run, Kiro prompts you to authenticate using device flow. + +## Authentication + +Kiro uses device flow authentication, which requires interactive login through +a web browser. This method provides secure authentication without storing API +keys directly. + +### Device flow login + +When you first run Kiro, it prompts you to authenticate: + +1. Kiro displays a URL and a verification code +2. Open the URL in your web browser +3. Enter the verification code +4. Complete the authentication flow in your browser +5. Return to the terminal - Kiro proceeds automatically + +The authentication session is persisted in the sandbox and doesn't require +repeated login unless you destroy and recreate the sandbox. + +### Manual login + +You can trigger the login flow manually: + +```console +$ sbx run kiro --name -- login --use-device-flow +``` + +This command initiates device flow authentication without starting a coding +session. + +### Authentication persistence + +Kiro stores authentication state in `~/.local/share/kiro-cli/data.sqlite3` +inside the sandbox. This database persists as long as the sandbox exists. If +you destroy the sandbox, you'll need to authenticate again when you recreate +it. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host. Only +project-level configuration in the working directory is available inside the +sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +Kiro requires minimal configuration. The agent runs with trust-all-tools mode +by default, which lets it execute commands without repeated approval +prompts. + +### Pass options at runtime + +Pass Kiro CLI options after `--`: + +```console +$ sbx run kiro --name -- +``` + +## Base image + +Template: `docker/sandbox-templates:kiro` + +Preconfigured to run without approval prompts. Authentication state is +persisted across sandbox restarts. + +See [Customize](../customize/) to pre-install tools or customize this +environment. diff --git a/content/manuals/ai/sandboxes/agents/opencode.md b/content/manuals/ai/sandboxes/agents/opencode.md new file mode 100644 index 00000000000..e327d8ffa59 --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/opencode.md @@ -0,0 +1,101 @@ +--- +title: OpenCode +weight: 60 +description: | + Use OpenCode in Docker Sandboxes with multi-provider authentication and TUI + interface for AI development. +keywords: docker sandboxes, opencode, ai agent, authentication, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This guide covers authentication, configuration, and usage of OpenCode in a +sandboxed environment. + +Official documentation: [OpenCode](https://opencode.ai/docs) + +## Quick start + +Create a sandbox and run OpenCode for a project directory: + +```console +$ sbx run opencode ~/my-project +``` + +The workspace parameter is optional and defaults to the current directory: + +```console +$ cd ~/my-project +$ sbx run opencode +``` + +OpenCode launches a TUI (text user interface) where you can select your +preferred LLM provider and interact with the agent. + +## Authentication + +OpenCode supports multiple providers. Store keys for the providers you want to +use with [stored secrets](../security/credentials.md#stored-secrets): + +```console +$ sbx secret set -g openai +$ sbx secret set -g anthropic +$ sbx secret set -g google +$ sbx secret set -g xai +$ sbx secret set -g groq +$ sbx secret set -g aws +``` + +You only need to configure the providers you want to use. OpenCode detects +available credentials and offers those providers in the TUI. + +You can also use environment variables (`OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, +`GOOGLE_API_KEY`, `XAI_API_KEY`, `GROQ_API_KEY`, `AWS_ACCESS_KEY_ID`). See +[Credentials](../security/credentials.md) for details on both methods. + +## Configuration + +Sandboxes don't pick up user-level configuration from your host. Only +project-level configuration in the working directory is available inside the +sandbox. See +[Why doesn't the sandbox use my user-level agent configuration?](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for workarounds. + +OpenCode uses a TUI interface and doesn't require extensive configuration +files. The agent prompts you to select a provider when it starts, and you can +switch providers during a session. + +### Pass options at runtime + +Pass OpenCode CLI options after `--`: + +```console +$ sbx run opencode --name -- +``` + +For example, to resume an existing session in a named sandbox: + +```console +$ sbx run -- -s +``` + +### TUI mode + +OpenCode launches in TUI mode by default. The interface shows: + +- Available LLM providers (based on configured credentials) +- Current conversation history +- File operations and tool usage +- Real-time agent responses + +Use keyboard shortcuts to navigate the interface and interact with the agent. + +## Base image + +Template: `docker/sandbox-templates:opencode` + +OpenCode supports multiple LLM providers with automatic credential injection +through the sandbox proxy. + +See [Customize](../customize/) to pre-install tools or customize this +environment. diff --git a/content/manuals/ai/sandboxes/agents/shell.md b/content/manuals/ai/sandboxes/agents/shell.md new file mode 100644 index 00000000000..36cd0a0ec17 --- /dev/null +++ b/content/manuals/ai/sandboxes/agents/shell.md @@ -0,0 +1,43 @@ +--- +title: Shell +weight: 90 +description: Run an agent-less sandbox with a Bash login shell for manual setup, testing custom agent implementations, or inspecting a running environment. +keywords: sandboxes, sbx, shell, agent, manual setup, testing +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +`sbx run shell` drops you into a Bash login shell inside a sandbox with no +pre-installed agent binary. It's useful for installing and configuring +agents manually, testing custom implementations, or inspecting a running +environment. + +```console +$ sbx run shell ~/my-project +``` + +The workspace path defaults to the current directory. To run a one-off +command instead of an interactive shell, pass it after `--`: + +```console +$ sbx run shell -- -c "echo 'Hello from sandbox'" +``` + +Set your API keys as environment variables so the sandbox proxy can inject +them into API requests automatically. Credentials are never stored inside +the VM: + +```console +$ export ANTHROPIC_API_KEY=sk-ant-xxxxx +$ export OPENAI_API_KEY=sk-xxxxx +``` + +Once inside the shell, you can install agents using their standard methods, +for example `npm install -g @continuedev/cli`. For complex setups, build a +[custom template](../customize/templates.md) instead of installing +interactively each time. + +## Base image + +The shell sandbox uses the `shell` base image — the common base environment +without a pre-installed agent. diff --git a/content/manuals/ai/sandboxes/architecture.md b/content/manuals/ai/sandboxes/architecture.md new file mode 100644 index 00000000000..8361927496c --- /dev/null +++ b/content/manuals/ai/sandboxes/architecture.md @@ -0,0 +1,70 @@ +--- +title: Architecture +weight: 40 +description: Technical architecture of Docker Sandboxes; workspace mounting, storage, networking, and sandbox lifecycle. +keywords: docker sandboxes, architecture, microVM, workspace mounting, sandbox lifecycle +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This page explains how Docker Sandboxes work under the hood. For the security +properties of the architecture, see [Sandbox isolation](security/isolation.md). + +## Workspace mounting + +Your workspace is mounted directly into the sandbox through a filesystem +passthrough. The sandbox sees your actual host files, so changes in either +direction are instant with no sync process involved. + +Your workspace is mounted at the same absolute path as on your host. Preserving +absolute paths means error messages, configuration files, and build outputs all +reference paths you can find on your host. The agent sees exactly the directory +structure you see, which reduces confusion when debugging or reviewing changes. + +## Storage and persistence + +When you create a sandbox, everything inside it persists until you remove it: +Docker images and containers built or pulled by the agent, installed packages, +agent state and history, and workspace changes. + +Sandboxes are isolated from each other. Each one maintains its own Docker +daemon state, image cache, and package installations. Multiple sandboxes don't +share images or layers. + +Each sandbox consumes disk space for its VM image, Docker images, container +layers, and volumes, and this grows as you build images and install packages. + +## Networking + +All outbound traffic from the sandbox routes through an HTTP/HTTPS proxy on +your host. Agents are configured to use the proxy automatically. The proxy +enforces [network access policies](security/policy.md) and handles +[credential injection](security/credentials.md). See +[Network isolation](security/isolation.md#network-isolation) for how this +works and [Default security posture](security/defaults.md) for what is +allowed out of the box. + +## Lifecycle + +`sbx run` initializes a VM with a workspace for a specified agent and starts +the agent. You can stop and restart without recreating the VM, preserving +installed packages and Docker images. + +Sandboxes persist until explicitly removed. Stopping an agent doesn't delete +the VM; environment setup carries over between runs. Use `sbx rm` to delete +the sandbox, its VM, and all of its contents. If the sandbox used +`--branch`, the worktree directories and their branches are also removed. + +## Comparison to alternatives + +| Approach | Isolation | Docker access | Use case | +| --------------------------------------------------- | -------------------- | ------------------ | ------------------ | +| Sandboxes (microVMs) | Full (hypervisor) | Isolated daemon | Autonomous agents | +| Container with socket mount | Partial (namespaces) | Shared host daemon | Trusted tools | +| [Docker-in-Docker](https://hub.docker.com/_/docker) | Partial (privileged) | Nested daemon | CI/CD pipelines | +| Host execution | None | Host daemon | Manual development | + +Sandboxes trade higher resource overhead (a VM plus its own daemon) for +complete isolation. Use containers when you need lightweight packaging without +Docker access. Use sandboxes when you need to give something autonomous full +Docker capabilities without trusting it with your host environment. diff --git a/content/manuals/ai/sandboxes/customize/_index.md b/content/manuals/ai/sandboxes/customize/_index.md new file mode 100644 index 00000000000..a5441fca1ee --- /dev/null +++ b/content/manuals/ai/sandboxes/customize/_index.md @@ -0,0 +1,65 @@ +--- +title: Customizing sandboxes +linkTitle: Customize +description: Build reusable sandbox images, extend agents with tools and credentials, and define custom agents using templates and kits. +keywords: sandboxes, sbx, customize, templates, kits, mixins, custom agents +weight: 35 +aliases: + - /ai/sandboxes/agents/custom-environments/ +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Docker Sandboxes offers two ways to customize a sandbox beyond the built-in +defaults: + +- [Templates](templates.md) — reusable sandbox images with tools, packages, + and configuration baked in. Extend a base image with a Dockerfile, or + save a running sandbox as a template. +- [Kits](kits.md) — declarative YAML artifacts that extend an agent with + tools, credentials, network rules, and files at runtime, or define a new + agent from scratch. + +Kits are experimental. The kit file format, CLI commands, and experience for +creating, loading, and managing kits are subject to change as the feature +evolves. Share feedback and bug reports in the +[docker/sbx-releases](https://github.com/docker/sbx-releases) repository. + +## Templates and kits, side by side + +A template is a Docker image that the sandbox runs. It's built ahead +of time with a Dockerfile (or saved from a running sandbox), pushed to a +registry, and pulled when a sandbox is created. Use templates for things +that belong in an image: system packages, language toolchains, large +dependencies — anything you'd rather not reinstall on every sandbox start. + +A kit is a YAML artifact applied at sandbox creation. The kit can run +install commands, drop files into the sandbox, declare network and +credential rules, and (for agent kits) define which template image the +agent runs in. Use kits for things that vary per agent or per team: +shared linter config, project-specific install steps, credential +injection for a service the agent talks to. + +Templates and kits work together. An agent kit's `agent.image` field +points at a template: the template provides the base environment, the +kit layers config, secrets, and runtime behavior on top. A team can ship +one heavy template and several thin kits without rebuilding the image +each time something changes. + +## When to use which + +| Goal | Option | +| --------------------------------------------------------- | ------------------------------------------------------------- | +| Pre-install tools and packages into a reusable base image | [Template](templates.md) | +| Capture a configured running sandbox for reuse | [Saved template](templates.md#saving-a-sandbox-as-a-template) | +| Add a tool, credential, or config to agent runs via YAML | [Kit (mixin)](kits.md) | +| Define a new agent from scratch | [Kit (agent)](kits.md#defining-an-agent) | + +Templates and kits can be used together. A template bakes heavy tools into +the image for fast sandbox startup; a kit layered on top adds per-run +credentials, config, or extra capabilities. + +## Tutorials + +- [Build your own agent kit](build-an-agent.md) — step-by-step walkthrough + for packaging [Amp](https://ampcode.com/) as an agent kit. diff --git a/content/manuals/ai/sandboxes/customize/build-an-agent.md b/content/manuals/ai/sandboxes/customize/build-an-agent.md new file mode 100644 index 00000000000..735590297d9 --- /dev/null +++ b/content/manuals/ai/sandboxes/customize/build-an-agent.md @@ -0,0 +1,328 @@ +--- +title: Build your own agent kit +linkTitle: Build an agent +description: Walk through building an agent kit for Amp, from base image choice to invocation. +keywords: sandboxes, sbx, kits, agent, tutorial, amp, ampcode +weight: 30 +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +> [!NOTE] +> Kits are experimental. The kit file format, CLI commands, and experience +> for creating, loading, and managing kits are subject to change as the +> feature evolves. Share feedback and bug reports in the +> [docker/sbx-releases](https://github.com/docker/sbx-releases) repository. + +This tutorial walks through building an agent kit for the +[Amp](https://ampcode.com/) coding agent. Each step explains the decision +behind a part of the spec, so you can apply the same reasoning to other agents. + +For reference on every field, see the [Kits](kits.md) page. This tutorial +focuses on the journey. + +The finished kit is also published as a runnable sample at +[docker/sbx-kits-contrib](https://github.com/docker/sbx-kits-contrib/tree/main/amp) — +useful as a reference while you follow along. + +## Choose a base image + +An agent kit needs a container image that satisfies the +[base image requirements](kits.md#base-image-requirements): non-root +`agent` user at UID 1000, passwordless sudo, `/home/agent/` home, and HTTP +proxy environment variable forwarding. + +Rather than build an image from scratch, extend one of the published +sandbox templates. Three common starting points: + +- `docker/sandbox-templates:shell`. Generic base with no pre-installed + agent. +- `docker/sandbox-templates:shell-docker`. Same, with Docker Engine inside + the sandbox. +- Agent-specific variants (`claude-code`, `codex`, etc.). Only useful if + you're extending that specific agent. + +For Amp, pick `shell-docker`: + +- Amp isn't pre-installed in any variant, so you need a generic base + (`shell`). +- Docker support is handy since coding agents often need to run containers. +- If you don't need Docker inside the sandbox, use the `shell` tag for a + lighter, non-privileged environment. + +## Plan authentication + +Amp authenticates with an API key in `AMP_API_KEY`. To keep the real key +out of the VM, you split the work in two: + +- The kit's network section maps the API host to a service identifier + and tells the proxy which header to inject. +- You provide your key once on the host, via sbx's secret store. The + real value stays on the host; only a placeholder reaches the sandbox. + +Inside the sandbox `AMP_API_KEY` is set to that placeholder. The proxy +substitutes the real key on outbound requests to the API host, so the +secret never enters the sandbox. A later section walks through the +specific command for storing the key. + +## Write the agent block + +The `agent:` block tells the sandbox how to launch Amp when the user +attaches. + +```yaml {title="amp/spec.yaml"} +schemaVersion: "1" +kind: agent +name: amp +displayName: Amp +description: The frontier coding agent. + +agent: + image: "docker/sandbox-templates:shell-docker" + aiFilename: AGENTS.md + persistence: persistent + entrypoint: + run: [amp, --dangerously-allow-all] +``` + +- `aiFilename: AGENTS.md` tells the sandbox to create `AGENTS.md` at launch + and append the [`memory`](#prime-amp-with-memory) block to it. Amp reads + this file for instructions. +- `persistence: persistent` keeps Amp's state (auth tokens, history) in a + named volume across sandbox restarts. Without it, you re-authenticate + every time. +- `entrypoint.run` runs `amp` in "YOLO-mode" when the sandbox starts. Adjust if + you want to pass different args on startup. + +## Install Amp + +Amp installs via a curl-to-bash script: + +```yaml +commands: + install: + - command: "curl -fsSL https://ampcode.com/install.sh | bash" + user: "1000" + description: Install Amp +``` + +Note `user: "1000"`. That's the agent user. Install commands run as root +(UID 0) by default, and Amp's installer puts the binary in the user's home +directory. Running as root would land the binary in `/root/` where the +agent can't reach it. + +## Allow network access + +The network block does two things: it lists the hosts the sandbox is +allowed to reach (`allowedDomains`), and it wires the kit-side half of +the auth flow from [Plan authentication](#plan-authentication) +(`serviceDomains` + `serviceAuth`). + +```yaml +network: + serviceDomains: + ampcode.com: amp + serviceAuth: + amp: + headerName: Authorization + valueFormat: "Bearer %s" + allowedDomains: + - "ampcode.com:443" + - "*.ampcode.com:443" +``` + +`allowedDomains` here covers the apex (`ampcode.com`) and the +install/CDN subdomains (`*.ampcode.com`). Treat it as a starting point; +Amp may reach other domains (model providers, analytics, updates) that +you'll discover by watching `sbx policy log` while testing. + +For the auth wiring, when the agent makes an outbound request to +`ampcode.com`, the proxy looks up the host in `serviceDomains` to find +the service id `amp`, then uses `serviceAuth.amp` to inject an +`Authorization: Bearer ` header. The `` value comes from the +secret you'll register in +[Register your API key](#register-your-api-key), matched by host. The +service id (`amp`) is just a label that ties the two blocks together — +pick any name. + +> [!IMPORTANT] +> Keep `serviceDomains` narrow. Mapping `*.ampcode.com` would push the +> proxy into TLS-intercepting mode for every subdomain — including the +> binary CDN the install script downloads from — which corrupts those +> downloads. List only the host that actually needs auth. + +## Prime Amp with memory + +The `memory` field appends markdown to `AGENTS.md` at sandbox creation. +Use it to tell Amp about the sandbox environment so it knows the +conventions when it starts. + +```yaml +memory: | + ## Sandbox environment + + You are running inside a Docker sandbox. The workspace is mounted at + its absolute host path. `sudo` is passwordless; use it for package + installs. Docker is available inside the sandbox; containers you start + are isolated in the microVM. +``` + +Keep this short and sandbox-specific. For project instructions, put a +regular `AGENTS.md` in the workspace. + +## The full spec + +Putting it all together: + +```yaml {title="amp/spec.yaml"} +schemaVersion: "1" +kind: agent +name: amp +displayName: Amp +description: The frontier coding agent. + +agent: + image: "docker/sandbox-templates:shell-docker" + aiFilename: AGENTS.md + persistence: persistent + entrypoint: + run: [amp, --dangerously-allow-all] + +network: + serviceDomains: + ampcode.com: amp + serviceAuth: + amp: + headerName: Authorization + valueFormat: "Bearer %s" + allowedDomains: + - "ampcode.com:443" + - "*.ampcode.com:443" + +commands: + install: + - command: "curl -fsSL https://ampcode.com/install.sh | bash" + user: "1000" + description: Install Amp + +memory: | + ## Sandbox environment + + You are running inside a Docker sandbox. The workspace is mounted at + its absolute host path. `sudo` is passwordless; use it for package + installs. +``` + +## Register your API key + +Register your Amp API key on the host with `sbx secret set-custom`. The +value goes into the host secret store, and a placeholder is exposed +inside every sandbox you launch from this kit. + +Amp validates `AMP_API_KEY`'s format at startup, so the placeholder needs +to look like a real Amp key. Pick a placeholder shape that matches Amp's +expected format: + +```console +$ sbx secret set-custom -g \ + --host ampcode.com \ + --env AMP_API_KEY \ + --placeholder "sgamp-{rand}" \ + --value "$AMP_API_KEY" +``` + +`{rand}` expands to a random suffix at registration time. Inside the +sandbox `AMP_API_KEY` is set to that placeholder; Amp accepts it as a +syntactically valid key, and the proxy substitutes the real secret on +outbound requests to `ampcode.com`. + +> [!TIP] +> `sbx secret set-custom` is only required because Amp validates the +> key's format. If your agent reads the env var without a local format +> check, you can declare `environment.proxyManaged: [AMP_API_KEY]` in +> the kit instead and skip this user-side step — the proxy uses a +> default sentinel value (`proxy-managed`) that the agent never sees +> rejected. + +> [!NOTE] +> `sbx secret set-custom` is an experimental command and isn't listed +> in `sbx secret --help`. It works today but may change in future +> releases. This tutorial surfaces it because there's no other path to +> register a custom-format placeholder. + +## Run it + +Validate the spec: + +```console +$ sbx kit validate ./amp/ +``` + +Launch a sandbox with the kit, passing the kit's `name:` (`amp`) as the +agent argument: + +```console +$ sbx run --kit ./amp/ amp +``` + +The published copy of this kit also runs directly from the contrib +repository: + +```console +$ sbx run --kit "git+https://github.com/docker/sbx-kits-contrib.git#dir=amp" amp +``` + +## Iterate + +As you use the kit, you'll likely hit missing domains or install quirks. +Two loops help: + +- Watch the network policy log (`sbx policy log`) to catch blocked + requests, then add their domains to `allowedDomains`. +- Edit the spec and re-run `sbx run --kit ./amp/ amp` to pick up changes. + Remove the sandbox first (`sbx rm `) for a clean start. + +Flesh out the `memory` block as you refine how Amp should behave in the +sandbox. + +## Publish + +Once the kit works, share it by packaging as a ZIP, pushing to an OCI +registry, or committing to a Git repository. See +[Packaging and distribution](kits.md#packaging-and-distribution) for the +`sbx kit` subcommands. + +## Adapt this to another agent + +Most of the specifics here are Amp's. To port the pattern, work through +the same decisions for your agent: + +- **Base image**: `shell-docker` if you need Docker inside the sandbox, + `shell` otherwise. Or extend either with your own image if the install + is heavy. +- **Install**: a `commands.install` block at runtime, or bake the agent + into a custom image. Pick install if it's a one-line script; bake if + the install is slow or you need a pinned version. +- **Network mapping**: list only the API host in `serviceDomains`, not + a wildcard. Keep install/CDN paths out of TLS-intercepting mode. +- **Credential injection**: if the agent validates the API key's format + locally, register with `sbx secret set-custom` and pick a matching + placeholder. If it accepts the env var as-is, declare + `environment.proxyManaged` in the kit and skip the user-side step. + +The rest — memory block, network-policy iteration, packaging — is the +same regardless of agent. + +## Remove the stored secret + +To remove the entry created earlier with `sbx secret set-custom`, pass +the host to `sbx secret rm`: + +```console +$ sbx secret rm -g --host ampcode.com +``` + +The `--host` flag on `sbx secret rm` isn't listed in +`sbx secret rm --help`, but it's the only way to remove entries +created with `set-custom`. Like `set-custom` itself, it's experimental +and may change. diff --git a/content/manuals/ai/sandboxes/customize/kit-examples.md b/content/manuals/ai/sandboxes/customize/kit-examples.md new file mode 100644 index 00000000000..1027ea8f4b5 --- /dev/null +++ b/content/manuals/ai/sandboxes/customize/kit-examples.md @@ -0,0 +1,242 @@ +--- +title: Kit examples +linkTitle: Examples +description: Copy-and-adapt spec.yaml snippets for common mixin and agent kit patterns — static files, install commands, background services, initFiles, Claude Code skills, and agent forks. +keywords: sandboxes, sbx, kits, mixins, examples, patterns, skills +weight: 25 +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +> [!NOTE] +> Kits are experimental. The kit file format, CLI commands, and experience +> for creating, loading, and managing kits are subject to change as the +> feature evolves. Share feedback and bug reports in the +> [docker/sbx-releases](https://github.com/docker/sbx-releases) repository. + +Each section below shows one `spec.yaml` snippet that demonstrates a +single kit pattern. These aren't complete, distributable kits — they're +small, focused examples you can lift into your own kit. For the full +spec reference, see [Kits](kits.md). + +## Drop a shared config file + +Use static files under `files/workspace/` when the content is the same +across every sandbox and doesn't need any runtime values substituted +in. Typical use cases: linter rules, editor settings, a shared +`.editorconfig`, team dotfiles. + +```text +ruff-lint/ +├── spec.yaml +└── files/ + └── workspace/ + └── ruff.toml +``` + +```yaml {title="ruff-lint/spec.yaml"} +schemaVersion: "1" +kind: mixin +name: ruff-lint +displayName: Ruff +description: Python linting with shared team config + +commands: + install: + - command: "uv tool install ruff@latest" + user: "1000" +``` + +```toml {title="ruff-lint/files/workspace/ruff.toml"} +line-length = 80 + +[lint] +select = ["E", "F", "I"] +``` + +## Install a tool at sandbox creation + +`commands.install` runs once per sandbox, at creation time. It's where +anything that needs to land in the image goes — package managers +(`apt-get`, `pip`, `npm`), binary downloads, or vendor install scripts. + +```yaml +commands: + install: + - command: "apt-get update && apt-get install -y jq" + - command: "curl -fsSL https://example.com/install.sh | sh" +``` + +Install commands run as root by default. Set `user: "1000"` when the +step should run as the agent user — for example, `npm install -g` +against a user-scoped prefix, or anything that writes to +`/home/agent/`. + +## Run a background service + + + +`commands.startup` runs at every sandbox start. For long-running +services, background them inside a shell command and redirect output to +a log file. Relying on the `background: true` field alone can leave +the service attached to a shell that exits, which silently kills it. + +```yaml +commands: + startup: + - command: + - sh + - -c + - nohup my-service --port 8080 > /tmp/my-service.log 2>&1 & + user: "1000" +``` + +The log file is worth the extra flag: if the service exits early, its +stderr goes to a parent shell that isn't attached to anything you can +read. An empty log file tells you the wrapper ran; a populated one +tells you why it failed. + +## Bake runtime values into a file with initFiles + +When a config file needs a value that isn't known until sandbox start +— most often the absolute workspace path — use `commands.initFiles`. +The `${WORKDIR}` placeholder expands to the primary workspace path +when the file is written. + +```yaml +commands: + initFiles: + - path: /home/agent/.local/bin/start-code-server.sh + content: | + exec code-server --bind-addr 0.0.0.0:8080 --auth none "${WORKDIR}" + mode: "0755" + startup: + - command: + - sh + - -c + - nohup /home/agent/.local/bin/start-code-server.sh > /tmp/code-server.log 2>&1 & + user: "1000" +``` + +`mode: "0755"` makes the generated file executable so the startup +command can invoke it directly. + +Use `initFiles` instead of a static file whenever the content depends +on a runtime value. Use a static file otherwise. + +> [!TIP] +> This snippet is lifted from the +> [code-server kit](https://github.com/docker/sbx-kits-contrib/tree/main/code-server) +> in the contrib repository, which is also a runnable sample that demonstrates +> the full pattern. + +## Ship a Claude Code skill + +Claude Code reads project-scoped skills from +`.claude/skills//SKILL.md` in the workspace. Drop one into +`files/workspace/` and it's available in the sandbox. + +```text +docker-review/ +├── spec.yaml +└── files/ + └── workspace/ + └── .claude/ + └── skills/ + └── docker-review/ + └── SKILL.md +``` + +```yaml {title="docker-review/spec.yaml"} +schemaVersion: "1" +kind: mixin +name: docker-review +displayName: Dockerfile review skill +description: Ships a Claude Code skill that reviews Dockerfiles +``` + +```markdown {title="docker-review/files/workspace/.claude/skills/docker-review/SKILL.md"} +--- +name: docker-review +description: Review a Dockerfile for best practices. Use when the user asks to review, audit, or improve a Dockerfile. +--- + +When reviewing a Dockerfile, check: + +1. Base image — pinned tag or digest, appropriate for the workload +2. Layer order — dependencies copied before application source +3. Image size — multi-stage builds, `.dockerignore`, package-manager cache flags +4. Security — non-root `USER`, no secrets in `ARG`/`ENV` +5. Reproducibility — pinned package versions, frontend directive where relevant +``` + +Kits have to target the workspace rather than `~/.claude/` because +sandboxes don't pick up user-level agent configuration from the host. +See the +[FAQ](../faq.md#why-doesnt-the-sandbox-use-my-user-level-agent-configuration) +for details. + +## Fork an existing agent + +Agent kits (`kind: agent`) define a full agent from scratch. The most +common variant is a fork of a built-in agent — same image and +credentials, but a different entrypoint. This example reproduces the +built-in `claude` agent but drops `--dangerously-skip-permissions` so +every tool call prompts for approval: + +```yaml {title="claude-safe/spec.yaml"} +schemaVersion: "1" +kind: agent +name: claude-safe +displayName: Claude Code (with approval prompts) +description: Claude Code without --dangerously-skip-permissions + +agent: + image: "docker/sandbox-templates:claude-code-docker" + aiFilename: CLAUDE.md + persistence: persistent + entrypoint: + run: [claude] + +network: + serviceDomains: + api.anthropic.com: anthropic + console.anthropic.com: anthropic + serviceAuth: + anthropic: + headerName: x-api-key + valueFormat: "%s" + allowedDomains: + - "claude.com:443" + +credentials: + sources: + anthropic: + env: + - ANTHROPIC_API_KEY +``` + +Launch with the kit's `name:` as the agent argument to `sbx run`: + +```console +$ sbx run claude-safe --kit ./claude-safe +``` + +For a step-by-step walkthrough of building a new agent kit from +scratch, see [Build an agent](build-an-agent.md). + +## More examples + +These patterns are all drawn from working kits in the +[sbx-kits-contrib](https://github.com/docker/sbx-kits-contrib) +repository, which contains each example as a complete, loadable kit. +Use it to study the full shape of a kit, or load one directly: + +```console +$ sbx run claude --kit "git+https://github.com/docker/sbx-kits-contrib.git#dir=" +``` diff --git a/content/manuals/ai/sandboxes/customize/kits.md b/content/manuals/ai/sandboxes/customize/kits.md new file mode 100644 index 00000000000..b37997d8ec5 --- /dev/null +++ b/content/manuals/ai/sandboxes/customize/kits.md @@ -0,0 +1,629 @@ +--- +title: Kits +description: Extend a sandbox with tools, credentials, network rules, and configuration using declarative YAML artifacts. +keywords: sandboxes, sbx, kits, mixins, customization, extensions, agents +weight: 20 +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +> [!NOTE] +> Kits are experimental. The kit file format, CLI commands, and experience +> for creating, loading, and managing kits are subject to change as the +> feature evolves. Share feedback and bug reports in the +> [docker/sbx-releases](https://github.com/docker/sbx-releases) repository. + +A kit packages a set of capabilities a sandbox can use, such as: + +- Tools to install +- Environment variables to set +- Credentials to inject +- Domains to allow +- Files to drop in +- Startup commands to run + +You declare these in a single `spec.yaml` file, point the CLI at the +directory (or a ZIP, OCI artifact, or Git URL), and the sandbox applies +and enforces them at runtime. Credentials stay on the host and go through +a proxy instead of entering the VM, and outbound traffic is restricted to +the domains the kit allows. + +A kit is either a mixin or an agent: + +- Mixin kits (`kind: mixin`) extend an existing agent with extra + capabilities. Stack several on the same sandbox. +- Agent kits (`kind: agent`) define a full agent from scratch: its image, + entrypoint, network policies, and everything else the agent needs. + +## What kits can do + +### Run commands + +A kit can run commands inside the sandbox automatically. **Install +commands** run once at creation; **startup commands** run each time +the sandbox starts. + +Install commands are the place to put anything an agent needs into the +image, via `apt`, `pip`, `npm`, `curl | bash`, or whatever fits: + +```yaml +commands: + install: + - command: "apt-get update && apt-get install -y jq" +``` + +Startup commands cover things like launching background services, +warming caches, or refreshing config on each start: + +```yaml +commands: + startup: + - command: ["sh", "-c", "my-daemon &"] +``` + +### Inject files + +Kits can inject files into the sandbox in two ways: **static files** bundled +with the kit, and **`initFiles`** written at startup with runtime values +substituted in. + +Static files work well for content that doesn't vary between sandboxes, such +as tool configurations, shared linter rules, helper scripts the agent can +invoke, or reference material like a style guide or API cheatsheet. + +```text +my-kit/ +├── spec.yaml +└── files/ + ├── home/ + │ └── .config/my-tool/settings.json + └── workspace/ + └── .editorconfig +``` + +`initFiles` cover content that depends on runtime values, such as an +absolute workspace path that a tool needs to bake into its config file +at startup: + +```yaml +commands: + initFiles: + - path: /home/agent/.my-tool/config.json + content: '{"workspace": "${WORKDIR}"}' + onlyIfMissing: true +``` + +See [`initFiles`](#initfiles) in the spec reference for all fields. + +### Set environment variables + +Environment variables set by the kit are available to the agent at +runtime: + +```yaml +environment: + variables: + MY_TOOL_WORKSPACE: /home/agent/my-tool +``` + +For credentials, see +[Authenticate to external services](#authenticate-to-external-services). +Don't put secret values directly in `environment.variables` — they'd +be visible inside the sandbox VM. + +### Control network access + +Network rules define which domains the sandbox can reach: + +```yaml +network: + allowedDomains: + - api.example.com + - "*.cdn.example.com" +``` + +For authenticated services, see +[Authenticate to external services](#authenticate-to-external-services). + +### Authenticate to external services + +A kit can attach credentials to outbound requests through the +host-side proxy. The agent inside the VM works with a sentinel value; +the proxy reads the real credential on the host and overwrites the +auth header before the request leaves the sandbox. + +The standard pattern uses four blocks tied to a service identifier +you choose (here, `my-service`): + +```yaml +network: + allowedDomains: + - api.example.com + serviceDomains: + api.example.com: my-service # Tag traffic to this domain + serviceAuth: + my-service: + headerName: Authorization # Overwrite this header + valueFormat: "Bearer %s" + +credentials: + sources: + my-service: + env: + - MY_SERVICE_API_KEY # Host-side credential lookup + +environment: + proxyManaged: + - MY_SERVICE_API_KEY # Set the in-VM env var to "proxy-managed" +``` + +The agent boots with `MY_SERVICE_API_KEY=proxy-managed`, sends a +request with that value in `Authorization`, and the proxy overwrites +the header with the real credential before forwarding. The real +secret never enters the VM. + +See [Credentials](../security/credentials.md) for how to provide the +credential value on your host, other approaches for cases the example +above doesn't fit, and what the proxy does at request time. + +### Define an agent + +Agent kits declare an `agent:` block with the image the agent runs in and +the command the user attaches to when they launch the sandbox: + +```yaml +agent: + image: "my-registry/my-agent:latest" + entrypoint: + run: [my-agent, "--yolo"] +``` + +See [Agent kits](#agent-kits) for use cases and an example. + +## Mixin kits + +A mixin kit extends an existing agent with extra capabilities. Common use +cases: + +- Pre-install tools: linters, libraries, or other custom programs +- Grant the agent access to a new authenticated service (a database, a + vendor API) +- Inject shared team config (linter rules, editor settings, dotfiles) + +### Example: Python linting kit + +This kit installs [Ruff](https://docs.astral.sh/ruff/) and injects a shared +configuration file, so every sandbox starts with the same linting setup. + +```text +ruff-lint/ +├── spec.yaml +└── files/ + └── workspace/ + └── ruff.toml +``` + +```yaml {title="ruff-lint/spec.yaml"} +schemaVersion: "1" +kind: mixin +name: ruff-lint +displayName: Ruff Linter +description: Python linting with shared team config + +network: + allowedDomains: + - pypi.org + - files.pythonhosted.org + +commands: + install: + - command: "uv tool install ruff@latest" + user: "1000" + description: Install Ruff +``` + +```toml {title="ruff-lint/files/workspace/ruff.toml"} +line-length = 100 + +[lint] +select = ["E", "F", "I"] +``` + +> [!TIP] +> The templates for the built-in agents (`claude`, `codex`, etc) already +> includes `uv`, so this mixin can use it without installing it separately. + +To start a new sandbox with this mixin: + +```console +$ sbx run claude --kit /path/to/ruff-lint/ +``` + +To apply the mixin to a sandbox that's already running, use +[`sbx kit add`](#local) instead. The `--kit` flag only takes effect when a +sandbox is created. + +## Agent kits + +An agent kit defines a full agent from scratch — image, entrypoint, and +everything the agent needs. Common use cases: + +- Package a custom agent you've built so others can run it +- Ship a team-internal agent with defaults baked in +- Run a fork of an existing agent with your own config +- Prototype a new agent integration + +Agent kits declare everything a mixin kit can, plus an +[`agent:` block](#agent-block) that tells the sandbox how to launch the +agent. For a step-by-step walkthrough, see +[Build your own agent kit](build-an-agent.md). + +### Example: the built-in `claude` agent + +The `claude` agent you get from `sbx run claude` is defined as a kit. Here +is an abbreviated version of its spec, showing how the agent block combines +with network, credentials, environment, and commands: + +```yaml {title="claude/spec.yaml"} +schemaVersion: "1" +kind: agent +name: claude +agent: + image: "docker/sandbox-templates:claude-code-docker" + aiFilename: CLAUDE.md + persistence: persistent + entrypoint: + run: [claude, "--dangerously-skip-permissions"] + +network: + serviceDomains: + api.anthropic.com: anthropic + console.anthropic.com: anthropic + serviceAuth: + anthropic: + headerName: x-api-key + valueFormat: "%s" + allowedDomains: + - "claude.com:443" + +credentials: + sources: + anthropic: + env: + - ANTHROPIC_API_KEY + +environment: + variables: + IS_SANDBOX: "1" + +commands: + install: + - command: "curl -fsSL https://claude.ai/install.sh | bash" + user: "1000" + description: Install Claude Code +``` + +## Using kits + +Kits can be loaded from a local path (a directory or ZIP file), a Git +repository, or an OCI registry. Pass `--kit` more than once to stack +several kits on the same sandbox. + +> [!IMPORTANT] +> `--kit` only takes effect when a sandbox is created. Passing it +> against an existing sandbox name fails with +> `--kit can only be used when creating a new sandbox`. To extend a +> running sandbox with a kit, use [`sbx kit add`](#local) instead. + +### Local + +Point `--kit` at a directory or ZIP file on disk: + +```console +$ sbx run claude --kit ./my-kit/ +$ sbx run claude --kit ./my-kit-1.0.zip +``` + +While iterating on a kit, apply changes to a running sandbox with +`sbx kit add` instead of recreating it: + +```console +$ sbx kit add my-sandbox ./my-kit/ +``` + +`kit add` re-runs install commands and re-copies files. Kits can't be +removed from a running sandbox — remove and recreate it to start clean. + +### Git repository + +```console +$ sbx run claude --kit "git+https://github.com/docker/sbx-kits-contrib.git#ref=v0.1.0&dir=code-server" +``` + +- `#ref=` pins to a specific revision. Defaults to the + repository's default branch. +- `#dir=` loads a kit from a subdirectory. +- `git+ssh://` URLs also work, using your local SSH agent, Git credential + helpers, and `.netrc`. +- Quote the URL in shells where `&` starts a background job. + +### OCI registry + +```console +$ sbx run claude --kit ghcr.io/myorg/my-kit:1.0 +``` + +For Docker Hub, include the full `docker.io` prefix. See +[Packaging and distribution](#packaging-and-distribution) for publishing. + +> [!IMPORTANT] +> Private kits are only supported on Docker Hub. `sbx` reuses your +> `sbx login` session to pull private artifacts from Docker Hub. Other +> registries are pulled anonymously, so private kits hosted on +> registries other than Docker Hub fail to pull. + +## Packaging and distribution + +The `sbx kit` subcommands validate, inspect, and publish kits: + +- `sbx kit validate ` — check that a kit directory or ZIP is + well-formed. +- `sbx kit inspect ` — display kit details. Add `--json` for + machine-readable output. +- `sbx kit pack -o ` — package a directory as a ZIP file + for sharing. +- `sbx kit push ` — publish to an OCI registry (for example, + `ghcr.io/myorg/my-kit:1.0`). +- `sbx kit pull ` — download a kit from a registry as a ZIP file to + the working directory. + +For Docker Hub, include the full `docker.io` prefix — `sbx` doesn't add it +automatically. + +## Spec reference + +A kit directory has a required `spec.yaml` and an optional `files/` tree: + +```text +my-kit/ +├── spec.yaml # required +└── files/ # optional — static files to inject + ├── home/ + └── workspace/ +``` + +### Top-level fields + +```yaml +schemaVersion: "1" +kind: +name: +displayName: +description: +``` + +| Field | Required | Description | +| --------------- | -------- | ------------------------------------------------------------------------ | +| `schemaVersion` | Yes | Spec schema version. Set to `"1"`. | +| `kind` | Yes | `mixin` for kits that extend an agent; `agent` for kits that define one. | +| `name` | Yes | Unique identifier. Lowercase, alphanumeric, hyphens. | +| `displayName` | No | Human-readable name. | +| `description` | No | Short description. | + +The sections below apply to both kinds. Agent kits also declare an +[`agent:` block](#agent-block). + +### Credentials + +```yaml +credentials: + sources: + : + env: [, ...] + file: + path: + parser: + priority: +``` + +| Field | Description | +| -------------------------- | ------------------------------------------------------------- | +| `sources` | Map of service identifier to credential source. | +| `sources..env` | Environment variables to read on the host, in priority order. | +| `sources..file.path` | Path on host. `~` expands to home. | +| `sources..file.parser` | How to extract the value (for example, `"json:apiKey"`). | +| `sources..priority` | `env-first` (default) or `file-first`. | + +Service identifiers link credentials to [network rules](#network). + +### Network + +```yaml +network: + allowedDomains: [, ...] + serviceDomains: + : + serviceAuth: + : + headerName:
+ valueFormat: +``` + +| Field | Description | +| ------------------------- | ---------------------------------------------------------------- | +| `allowedDomains` | Domains the sandbox can reach. Wildcards supported. | +| `serviceDomains` | Map of domain to service identifier from `credentials.sources`. | +| `serviceAuth.headerName` | HTTP header the proxy sets (for example, `Authorization`). | +| `serviceAuth.valueFormat` | Format string for the header value (for example, `"Bearer %s"`). | + +### Environment + +```yaml +environment: + variables: + : + proxyManaged: [, ...] +``` + +| Field | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------- | +| `variables` | Key-value pairs set directly in the container. | +| `proxyManaged` | Environment variable names populated by the proxy at request time. Pair with [`credentials.sources`](#credentials). | + +Variable names must be valid shell identifiers (`[A-Za-z_][A-Za-z0-9_]*`). + +### Commands + +```yaml +commands: + install: + - command: + user: + description: + startup: + - command: [, ...] + user: + background: + description: + initFiles: + - path: + content: + mode: + onlyIfMissing: + description: +``` + +#### `install` + +Runs once during sandbox creation. Shell strings passed to `sh -c`. + +| Field | Default | Description | +| ------------- | ------- | ----------------------------- | +| `command` | — | Shell command string. | +| `user` | `"0"` | User to run as. `"0"` = root. | +| `description` | — | Human-readable description. | + +#### `startup` + +Runs at every sandbox start. String array, not interpreted by a shell. + +| Field | Default | Description | +| ------------- | -------- | ----------------------------------- | +| `command` | — | Command and args as a string array. | +| `user` | `"1000"` | User to run as. `"1000"` = agent. | +| `background` | `false` | Run in background. | +| `description` | — | Human-readable description. | + +Startup commands are non-interactive. They run before the agent +attaches, with no terminal connected, so they can't prompt the user +(for example, an interactive `aws login` will hang or fail). They also +don't gate the agent's entrypoint: the agent launches once startup +commands have been dispatched, regardless of `background`. Use them +for non-interactive prep — launching daemons, warming caches, +refreshing config — and use `commands.initFiles` for any value that +needs to land on disk before the agent runs. + +#### `initFiles` + +Files written at sandbox start, with runtime substitution. + +| Field | Default | Description | +| --------------- | -------- | --------------------------------------------------------- | +| `path` | — | Absolute container path. | +| `content` | — | File content. `${WORKDIR}` expands to the workspace path. | +| `mode` | `"0644"` | File permissions in octal. | +| `onlyIfMissing` | `false` | Skip if the file already exists. | + +### Static files + +```text +my-kit/files/ +├── home/ → /home/agent/ +└── workspace/ → primary workspace path +``` + +| Kit path | Container destination | +| ------------------ | --------------------------------------- | +| `files/home/` | `/home/agent/` (config files, dotfiles) | +| `files/workspace/` | The primary workspace path | + +Parent directories are created automatically. Existing files are +overwritten. Absolute paths and path-traversal sequences (`../../`) are +rejected. + +### Agent block + +Required for `kind: agent`. + +```yaml +agent: + image: + aiFilename: + persistence: + entrypoint: + run: [, ...] + args: [, ...] +``` + +| Field | Required | Description | +| ----------------------- | -------- | ---------------------------------------------------------------------------------------------- | +| `agent.image` | Yes | Docker image reference. See [Base image requirements](#base-image-requirements). | +| `agent.aiFilename` | No | Memory filename (for example, `AGENTS.md`). Appends top-level [`memory`](#memory) at creation. | +| `agent.persistence` | No | `persistent` (named volume across restarts) or `ephemeral` (default). | +| `agent.entrypoint.run` | No | Command and args as a string array. Replaces the image's entrypoint. | +| `agent.entrypoint.args` | No | Args appended to the image's existing entrypoint. | + +#### Base image requirements + +The agent's container image must provide: + +- A non-root `agent` user at UID 1000 with passwordless sudo. +- A `/home/agent/` home directory owned by `agent`. +- HTTP proxy environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, + `NO_PROXY`) preserved across sudo. +- The agent binary (baked in, or installed via + [`commands.install`](#commands)). + +Build on top of `docker/sandbox-templates:shell-docker` to get these for +free. + +#### Memory + +```yaml +memory: | + +``` + +Top-level field. Markdown appended to the agent's memory file at sandbox +creation. The agent reads this content at startup, so write it as +instructions or notes the agent should follow when working in the +sandbox. Applied only when `agent.aiFilename` is set. + +The file is written to the parent of the workspace path inside the +sandbox, not to the workspace itself. For a workspace mounted at +`/Users/you/myproject`, the memory file lands at +`/Users/you/AGENTS.md` (or whatever `aiFilename` is set to). It exists +only inside the sandbox — nothing is written to the host. + +## Debugging + +When a kit doesn't behave as expected, start with the network policy log +and direct inspection inside the sandbox: + +- `sbx policy log` shows every outbound request the sandbox proxy saw, + the rule it matched, extra context when available, and its `PROXY` + value, such as `forward`, `forward-bypass`, `transparent`, or + `browser-open`. Use it to diagnose install-time download failures, + blocked domains, and unexpected TLS interception. If downloads fail or + arrive corrupted after you add `serviceDomains`, check whether the + service mapping is too broad. Map only the hosts that need credential + injection. +- `sbx exec -- ` runs an arbitrary command inside an + existing sandbox. Useful for inspecting post-install state without + recreating: `which mytool`, `ls /home/agent/.local/bin/`, + `cat /home/agent/.config/...`, and so on. + +Install and startup command output is only emitted during `sbx run` or +`sbx create`; `sbx` doesn't retain it for later inspection. To repeat +setup with fresh output, remove and recreate the sandbox: +`sbx rm && sbx run ...`. diff --git a/content/manuals/ai/sandboxes/customize/templates.md b/content/manuals/ai/sandboxes/customize/templates.md new file mode 100644 index 00000000000..25367ae0e0d --- /dev/null +++ b/content/manuals/ai/sandboxes/customize/templates.md @@ -0,0 +1,236 @@ +--- +title: Templates +weight: 10 +description: Build reusable sandbox images with tools and configuration baked in, or save a running sandbox as a template. +keywords: sandboxes, sbx, templates, images, dockerfile, snapshot, custom environments +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Every sandbox is customizable — agents install packages, pull images, and +configure tools as they work, and those changes persist for the sandbox's +lifetime. Templates capture a configured environment into a reusable image +so you don't have to set it up again every time. + +## Custom templates + +Custom templates are reusable sandbox images that extend one of the built-in +agent environments with additional tools and configuration baked in. Instead +of asking the agent to install packages every time, build a template once and +reuse it across sandboxes and team members. + +Templates make sense when multiple people need the same environment, when +setup involves steps that are tedious to repeat, or when you need pinned +versions of specific tools. For one-off work, the default image is fine — +ask the agent to install what's needed. + +> [!NOTE] +> Custom templates customize an existing agent's environment — they don't +> create new agent runtimes. The agent that launches inside the sandbox is +> determined by the base image variant you extend and the agent you specify +> in the `sbx run` command, not by binaries installed in the template. To +> define a new agent from scratch, see [Kits](kits.md#defining-an-agent). + +### Base images + +All sandbox templates are published as +`docker/sandbox-templates:`. They are based on Ubuntu and run as a +non-root `agent` user with sudo access. Most variants include Git, Docker +CLI, and common development tools like Node.js, Python, Go, and Java. + +| Variant | Agent | +| --------------------- | -------------------------------------------------------------------- | +| `claude-code` | [Claude Code](https://claude.ai/download) | +| `claude-code-minimal` | Claude Code with a minimal toolset (no Node.js, Python, Go, or Java) | +| `codex` | [OpenAI Codex](https://github.com/openai/codex) | +| `copilot` | [GitHub Copilot](https://github.com/github/copilot-cli) | +| `cursor-agent` | [Cursor](https://cursor.com/cli) | +| `docker-agent` | [Docker Agent](https://github.com/docker/docker-agent) | +| `droid` | [Droid](https://www.factory.ai) | +| `gemini` | [Gemini CLI](https://github.com/google-gemini/gemini-cli) | +| `kiro` | [Kiro](https://kiro.dev) | +| `opencode` | [OpenCode](https://opencode.ai) | +| `shell` | No agent pre-installed. Use for manual agent setup. | + +Each variant also has a `-docker` version (for example, `claude-code-docker`) +that includes a full Docker Engine running inside the sandbox — no local Docker +daemon required. When you pick a built-in agent without specifying a custom +template, `sbx run` and `sbx create` use the `-docker` template variants by +default. + +The agent containers created from the `-docker` templates run in privileged +mode inside the microVM (not on your host), with a dedicated block volume at +`/var/lib/docker`, and `dockerd` starts automatically inside the sandbox. The +block volume defaults to 50 GB and uses a sparse file, so it only consumes +disk space as Docker writes to it. + +To override the volume size, set the `DOCKER_SANDBOXES_DOCKER_SIZE` +environment variable to a size string before starting the sandbox: + +```console +$ DOCKER_SANDBOXES_DOCKER_SIZE=10g sbx run claude +``` + +Use the non-Docker variant if you don't need to build or run containers +inside the sandbox and want a lighter, non-privileged environment. Specify +it explicitly with `--template`: + +```console +$ sbx run claude --template docker.io/docker/sandbox-templates:claude-code +``` + +### Build a custom template + +Building a custom template requires +[Docker Desktop](https://docs.docker.com/desktop/). + +Write a Dockerfile that extends one of the base images. Pick the variant +that matches the agent you plan to run. For example, extend `claude-code` +to customize a Claude Code environment, or `codex` to customize an OpenAI +Codex environment. + +The following example creates a Claude Code template with Rust and +protocol buffer tools pre-installed: + +```dockerfile +FROM docker/sandbox-templates:claude-code +USER root +RUN apt-get update && apt-get install -y protobuf-compiler +USER agent +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +``` + +Use `root` for system-level package installations (`apt-get`), and switch +back to `agent` before installing user-level tools. Tools that install into +the home directory, such as `rustup`, `nvm`, or `pyenv`, must run as +`agent` — otherwise they install under `/root/` and aren't available in +the sandbox. + +Build the image and push it to an OCI registry, such as Docker Hub: + +```console +$ docker build -t my-org/my-template:v1 --push . +``` + +> [!NOTE] +> The Docker daemon used by Docker Sandboxes pulls templates from a +> registry directly; it doesn't share the image store of your local Docker +> daemon on the host. + +> [!IMPORTANT] +> Private templates are only supported on Docker Hub. `sbx` reuses your +> `sbx login` session to pull private images from Docker Hub. Other +> registries (such as GitHub Container Registry, ECR, or a self-hosted +> registry like Nexus) are pulled anonymously, so private images on those +> registries fail to pull. + +For locally-built images or private images on registries that `sbx` +can't authenticate against, save the image to a tar and load it +directly into the sandbox runtime instead of pulling from a registry: + +```console +$ docker image save my-org/my-template:v1 -o my-template.tar +$ sbx template load my-template.tar +$ sbx run --template my-org/my-template:v1 claude +``` + +`sbx template load` imports the tar into the sandbox runtime's image +store, so the image doesn't need to be reachable from a registry at +sandbox creation time. + +Unless you use the permissive `allow-all` network policy, you may also need +to allow-list any domains that your custom tools depend on: + +```console +$ sbx policy allow network "*.example.com:443,example.com:443" +``` + +Then run a sandbox with your template. The agent you specify must match +the base image variant your template extends: + +```console +$ sbx run --template docker.io/my-org/my-template:v1 claude +``` + +Because this template extends the `claude-code` base image, you run it +with `claude`. If you extend `codex`, use `codex`; if you extend `shell`, +use `shell` (which drops you into a Bash shell with no agent). + +> [!NOTE] +> Unlike Docker commands, `sbx` does not automatically resolve the Docker +> Hub domain (`docker.io`) in image references. + +### Template caching + +Template images are cached locally. The first use pulls from the registry; +subsequent sandboxes reuse the cache. Cached images persist across sandbox +creation and deletion, and are cleared when you run `sbx reset`. + +## Saving a sandbox as a template + +Instead of writing a Dockerfile, you can save a running sandbox's state as +a template. This captures installed packages, configuration changes, and +files into a reusable image — useful when you've set up an environment +interactively and want to preserve it. + +### Save and reuse + +Stop the sandbox (or let the CLI prompt you), then save it with a name and +tag: + +```console +$ sbx template save my-sandbox my-template:v1 +``` + +The image is stored in the sandbox runtime's local image store. Create a +new sandbox from it with the `-t` flag: + +```console +$ sbx run -t my-template:v1 claude +``` + +### List and remove templates + +List all saved templates: + +```console +$ sbx template ls +``` + +Remove a template you no longer need: + +```console +$ sbx template rm my-template:v1 +``` + +### Export and import + +To share a saved template or move it to another machine, export it as a +tar file: + +```console +$ sbx template save my-sandbox my-template:v1 --output my-template.tar +``` + +On the other machine, load the tar file and use it: + +```console +$ sbx template load my-template.tar +$ sbx run -t my-template:v1 claude +``` + +### Limitations + +Agent configuration files are always recreated when a sandbox is created. +Changes to user-level agent configuration files, such as +`/home/agent/.claude/settings.json` and `/home/agent/.claude.json`, do not +persist in saved templates. + +If the saved template was built for a different agent than the one you +specify in `sbx run`, you get a warning. For example, saving a Claude +sandbox and running it with `codex` produces: + +```text +⚠ WARNING: template "my-template:v1" was built for the "claude" agent but you are using "codex". + The sandbox may not work correctly. Consider using: sbx run -t my-template:v1 claude +``` diff --git a/content/manuals/ai/sandboxes/docker-desktop.md b/content/manuals/ai/sandboxes/docker-desktop.md new file mode 100644 index 00000000000..57a07f97a74 --- /dev/null +++ b/content/manuals/ai/sandboxes/docker-desktop.md @@ -0,0 +1,238 @@ +--- +title: Docker Desktop sandboxes (deprecated) +linkTitle: Docker Desktop +description: Run sandboxed AI coding agents using Docker Desktop and the docker sandbox CLI. +keywords: docker desktop, docker sandbox, ai agents, sandboxes, deprecated +weight: 80 +sitemap: false +notoc: true +--- + +> [!WARNING] +> +> The Docker Desktop-integrated `docker sandbox` commands are deprecated and +> replaced by the standalone [`sbx` CLI](/manuals/ai/sandboxes/_index.md). This +> deprecation applies only to the Docker Desktop integration, not to Docker +> Sandboxes. + +This page covers the Docker Desktop-integrated `docker sandbox` command for +running AI coding agents in isolated microVMs. This integration is superseded +by the standalone `sbx` CLI, which provides the full Docker Sandboxes workflow +and doesn't require Docker Desktop. + +> [!NOTE] +> Use the standalone `sbx` CLI for sandboxed AI agent workflows. + +## Prerequisites + +- Docker Desktop 4.58 or later +- macOS or Windows +- API keys for your chosen agent + +## Quick start + +1. Set your API key in your shell configuration file: + + ```plaintext {title="~/.bashrc or ~/.zshrc"} + export ANTHROPIC_API_KEY=sk-ant-api03-xxxxx + ``` + + Source your shell configuration and restart Docker Desktop so the daemon + picks up the variable. + +2. Create and run a sandbox: + + ```console + $ cd ~/my-project + $ docker sandbox run claude + ``` + + The first run takes longer while Docker initializes the microVM. + +Replace `claude` with a different [agent](#supported-agents) if needed. + +## Supported agents + +| Agent | Command | Notes | +| --------------------------------- | -------------- | ------------------------------------ | +| Claude Code | `claude` | Most tested implementation | +| Codex | `codex` | | +| Copilot | `copilot` | | +| Gemini | `gemini` | | +| [Docker Agent](/ai/docker-agent/) | `docker-agent` | Also available as a standalone tool | +| Kiro | `kiro` | | +| OpenCode | `opencode` | | +| Custom shell | `shell` | Minimal environment for manual setup | + +The agent type is specified when creating a sandbox and can't be changed later. + +## Authentication + +Each agent requires its own API key or credentials. Docker Sandboxes uses a +daemon that doesn't inherit environment variables from your shell session, so +you must set keys in your shell configuration file (not just export them in +your terminal). + +Common environment variables by agent: + +| Agent | Environment variable(s) | +| ------------ | ------------------------------------------------- | +| Claude Code | `ANTHROPIC_API_KEY` | +| Codex | `OPENAI_API_KEY` | +| Copilot | `GH_TOKEN` or `GITHUB_TOKEN` | +| Gemini | `GEMINI_API_KEY` or `GOOGLE_API_KEY` | +| Docker Agent | `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, and others | +| OpenCode | `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, and others | +| Kiro | Device flow (interactive browser login) | +| Shell | Any provider keys needed | + +After setting variables, source your shell configuration and restart Docker +Desktop. The sandbox proxy injects credentials into API requests so keys stay +on your host and are never stored inside the sandbox. + +## Commands + +```console +$ docker sandbox run AGENT [PATH] # Create and run +$ docker sandbox ls # List sandboxes +$ docker sandbox exec -it bash # Shell into a sandbox +$ docker sandbox rm # Remove a sandbox +$ docker sandbox reset # Remove all sandboxes +$ docker sandbox network proxy --policy … # Set network policy +$ docker sandbox network log # View network log +``` + +Sandboxes don't appear in `docker ps` because they're microVMs, not +containers. For the full command reference, see the +[CLI reference](/reference/cli/docker/sandbox/). + +Pass agent-specific CLI options after the sandbox name with a `--` separator: + +```console +$ docker sandbox run -- --continue +``` + +## Architecture + +Each sandbox is a lightweight microVM with its own kernel, using your system's +native virtualization (macOS virtualization.framework, Windows Hyper-V). The +default agent templates include a private Docker daemon, so `docker build` and +`docker compose up` run inside the sandbox without affecting your host. + +```plaintext +Host system + ├── Your containers and images + ├── Sandbox VM 1 + │ ├── Docker daemon (isolated) + │ ├── Agent container + │ └── Containers created by agent + └── Sandbox VM 2 + ├── Docker daemon (isolated) + └── Agent container +``` + +Your workspace syncs bidirectionally between host and sandbox at the same +absolute path. Outbound internet goes through an HTTP/HTTPS filtering proxy on +the host. See [Network policies](#network-policies) for configuration. + +## Network policies + +The filtering proxy controls what a sandbox can access. By default, all +traffic is allowed except private networks and localhost. + +Allow mode (block specific destinations): + +```console +$ docker sandbox network proxy my-sandbox \ + --policy allow \ + --block-cidr 10.0.0.0/8 +``` + +Deny mode (allow specific destinations): + +```console +$ docker sandbox network proxy my-sandbox \ + --policy deny \ + --allow-host api.anthropic.com \ + --allow-host "*.npmjs.org" +``` + +View what an agent is accessing: + +```console +$ docker sandbox network log +``` + +## Custom templates + +Build custom templates to pre-install tools: + +```dockerfile +FROM docker/sandbox-templates:claude-code + +USER root +RUN apt-get update && apt-get install -y build-essential \ + && rm -rf /var/lib/apt/lists/* +USER agent +``` + +```console +$ docker build -t my-template:v1 . +$ docker sandbox run -t my-template:v1 claude ~/project +``` + +## Base environment + +All agent templates share a common environment: + +- Ubuntu 25.10 +- Docker CLI (with Buildx and Compose), Git, GitHub CLI, Node.js, Go, Python 3, uv, make, jq, ripgrep +- Non-root `agent` user with sudo access +- Package managers: apt, pip, npm + +## Troubleshooting + + + +### 'sandbox' is not a docker command + + + +The CLI plugin isn't installed or isn't in the correct location. Verify the +plugin exists at `~/.docker/cli-plugins/docker-sandbox` and restart Docker +Desktop. + +### Beta features need to be enabled + +If your Docker Desktop is managed by an administrator with +[Settings Management](/enterprise/security/hardened-desktop/settings-management/), +ask them to +[allow beta features](/enterprise/security/hardened-desktop/settings-management/configure-json-file/#beta-features). + +### Authentication failure + +Verify your API key is valid and set in your shell configuration file (not +just exported in the current session). Source the file and restart Docker +Desktop. + +### Permission denied on workspace files + +Check **Docker Desktop** > **Settings** > **Resources** > **File Sharing** and +ensure your workspace path is listed. Verify file permissions with `ls -la`. + +### Sandbox crashes on Windows + +If launching multiple sandboxes causes crashes, end all `docker.openvmm.exe` +processes in Task Manager and restart Docker Desktop. Launch sandboxes one at a +time. + +### Persistent issues + +Reset all sandbox state: + +```console +$ docker sandbox reset +``` + +This stops all VMs and deletes all sandbox data. Create fresh sandboxes +afterward. diff --git a/content/manuals/ai/sandboxes/faq.md b/content/manuals/ai/sandboxes/faq.md new file mode 100644 index 00000000000..e2d9b0e2b8e --- /dev/null +++ b/content/manuals/ai/sandboxes/faq.md @@ -0,0 +1,162 @@ +--- +title: FAQ +weight: 70 +description: Frequently asked questions about Docker Sandboxes. +keywords: docker sandboxes, sbx, faq, sign in, telemetry +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +## Why do I need to sign in? + +Docker Sandboxes is built around the idea that you and your agents are a team. +Signing in gives each sandbox a verified identity, which lets Docker: + +- **Tie sandboxes to a real person.** Governance matters when agents can build + containers, install packages, and push code. Your Docker identity is the + anchor. +- **Enable team features.** Team-scale features like + [organization governance](security/governance.md), shared environments, and + audit logs need a concept of "who," and adding that later would be worse for + everyone. +- **Authenticate against Docker infrastructure.** Sandboxes pull images, run + daemons, and talk to Docker services. A Docker account makes that seamless. + +Your Docker account email is only used for authentication, not marketing. + +## Can I enforce sandbox policies across my organization? + +Yes. Admins can centrally manage network and filesystem policies from the +Docker Admin Console. Rules defined there apply to every sandbox in the +organization and take precedence over local rules set with `sbx policy`. +Admins can optionally delegate specific rule types back to local control so +developers can add additional allow rules. + +See [Organization governance](security/governance.md). This feature requires +a separate paid subscription — +[contact Docker Sales](https://www.docker.com/products/ai-governance/#contact-sales) +to get started. + +## Does the CLI collect telemetry? + +The `sbx` CLI collects basic usage data about CLI invocations: + +- Which command you ran +- Whether it succeeded or failed +- How long it took +- If you're signed in, your Docker username is included + +Docker Sandboxes doesn't monitor sessions, read your prompts, or access your +code. Your code stays in the sandbox and on your host. + +To opt out of all analytics, set the `SBX_NO_TELEMETRY` environment variable: + +```console +$ export SBX_NO_TELEMETRY=1 +``` + +## How do I set custom environment variables inside a sandbox? + +The [`sbx secret`](/reference/cli/sbx/secret/) command only supports a fixed set +of [services](security/credentials.md#built-in-services) (Anthropic, OpenAI, +GitHub, and others). If your agent needs an environment variable that isn't +tied to a supported service, such as `BRAVE_API_KEY` or a custom internal +token, write it to `/etc/sandbox-persistent.sh` inside the sandbox. This +file is sourced on every shell login, so the variable persists across agent +sessions for the sandbox's lifetime. + +Use `sbx exec` to append the export: + +```console +$ sbx exec -d bash -c "echo 'export BRAVE_API_KEY=your_key' >> /etc/sandbox-persistent.sh" +``` + +The `bash -c` wrapper is required so the `>>` redirect runs inside the +sandbox instead of on your host. + +> [!NOTE] +> Unlike `sbx secret`, which injects credentials through a host-side proxy +> without exposing them to the agent, this approach stores the value inside +> the sandbox. The agent process can read it directly. Only use this for +> credentials where proxy-based injection isn't available. + +Variables in `/etc/sandbox-persistent.sh` are sourced automatically when +bash runs inside the sandbox, including interactive sessions and agents +started with `sbx run`. If you run a command directly with +`sbx exec `, the command runs without a shell, so the +persistent environment file is not sourced. Wrap the command in `bash -c` +to load the environment: + +```console +$ sbx exec bash -c "your-command" +``` + +To verify the variable is set, open a shell in the sandbox: + +```console +$ sbx exec -it bash +$ echo $BRAVE_API_KEY +``` + +## Why do agents run without approval prompts? + +The sandbox itself is the safety boundary. Because agents run inside an +isolated microVM with [network policies](security/policy.md), +[credential isolation](security/credentials.md), and no access to your host +system outside the workspace, the usual reasons for approval prompts (preventing +destructive commands, network access, file modifications) are handled by the +sandbox isolation layers instead. + +If you prefer to re-enable approval prompts, change the permission mode +inside the session. Most agents let you switch permission modes after +startup. In Claude Code, use the `/permissions` command to change the mode +interactively. + +To make approval prompts the default for every session, define a custom +agent kit that overrides the agent's entrypoint to drop the +permission-skipping flag. For example, a kit that launches Claude Code +without `--dangerously-skip-permissions`: + +```yaml {title="claude-safe/spec.yaml"} +schemaVersion: "1" +kind: agent +name: claude-safe +agent: + image: "docker/sandbox-templates:claude-code-docker" + entrypoint: + run: [claude] +``` + +Run it with `sbx run claude-safe --kit ./claude-safe/`. See +[Agent kits](customize/kits.md#agent-kits) for the full pattern. + +## How do I know if my agent is running in a sandbox? + +Ask the agent. The agent can see whether or not it's running inside a sandbox. +In Claude Code, use the `/btw` slash command to ask without interrupting an +in-progress task: + +```text +/btw are you running in a sandbox? +``` + +## Why doesn't the sandbox use my user-level agent configuration? + +Sandboxes don't pick up user-level agent configuration from your host. This +includes directories like `~/.claude` for Claude Code or `~/.codex` for Codex, +where hooks, skills, and other settings are stored. Only project-level +configuration in the working directory is available inside the sandbox. + +To make configuration available in a sandbox, copy or move what you need into +your project directory before starting a session: + +```console +$ cp -r ~/.claude/skills .claude/skills +``` + +Don't use symlinks — a sandboxed agent can't follow symlinks to paths outside +the sandbox. + +Collocating skills and other agent configuration with the project itself is a +good practice regardless of sandboxes. It's versioned alongside the code and +evolves with the project as it changes. diff --git a/content/manuals/ai/sandboxes/get-started.md b/content/manuals/ai/sandboxes/get-started.md new file mode 100644 index 00000000000..337fc159cda --- /dev/null +++ b/content/manuals/ai/sandboxes/get-started.md @@ -0,0 +1,270 @@ +--- +title: Get started with Docker Sandboxes +linkTitle: Get started +weight: 10 +description: Install the sbx CLI, configure credentials, and work through your first sandbox session. +keywords: sandbox, sbx, get started, install, credentials, branch mode, network policy +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Docker Sandboxes run AI coding agents in isolated microVM sandboxes. Each +sandbox gets its own Docker daemon, filesystem, and network — the agent can +build containers, install packages, and modify files without touching your host +system. + +This page walks through a typical first session: installing the CLI, +authenticating your agent, running a sandbox, working with branches, and +cleaning up. + +## Prerequisites + +{{< tabs group="os" >}} +{{< tab name="macOS" >}} + +- macOS Tahoe (26) or later +- Apple silicon + +{{< /tab >}} +{{< tab name="Windows" >}} + +- 64-bit Intel or AMD (x86_64) +- Windows 11 +- Windows Hypervisor Platform enabled. Open an elevated PowerShell prompt (Run + as Administrator) and run: + ```powershell + Enable-WindowsOptionalFeature -Online -FeatureName HypervisorPlatform -All + ``` + +{{< /tab >}} +{{< tab name="Linux (Ubuntu)" >}} + +- Ubuntu 24.04 or later +- 64-bit Intel or AMD (x86_64) +- KVM hardware virtualization supported and enabled by the CPU. If you're + running inside a VM, nested virtualization must be turned on. Verify that KVM + is available: + ```console + $ lsmod | grep kvm + ``` + A working setup shows `kvm_intel` or `kvm_amd` in the output. If the output + is empty, run `kvm-ok` for diagnostics. If KVM is unavailable, `sbx` will + not start. +- Your user in the `kvm` group: + ```console + $ sudo usermod -aG kvm $USER + ``` + Log out and back in (or run `newgrp kvm`) for the group change to take effect. + +{{< /tab >}} +{{< /tabs >}} + +An API key or authentication method for the agent you want to use. Most agents +require an API key for their model provider (Anthropic, OpenAI, Google, and +others). See the [agent pages](agents/) for provider-specific instructions. + +Docker Desktop is not required to use `sbx`. + +## Install and sign in + +{{< tabs group="os" >}} +{{< tab name="macOS" >}} + +```console +$ brew install docker/tap/sbx +$ sbx login +``` + +{{< /tab >}} +{{< tab name="Windows" >}} + +```powershell +> winget install -h Docker.sbx +> sbx login +``` + +{{< /tab >}} +{{< tab name="Linux (Ubuntu)" >}} + +```console +$ curl -fsSL https://get.docker.com | sudo REPO_ONLY=1 sh +$ sudo apt-get install docker-sbx +$ sbx login +``` + +The first command adds Docker's `apt` repository to your system. + +{{< /tab >}} +{{< /tabs >}} + +If you need to install `sbx` manually, download a binary directly from the +[sbx-releases](https://github.com/docker/sbx-releases/releases) repository. + +`sbx login` opens a browser for Docker OAuth. On first login (and after `sbx +policy reset`), the CLI prompts you to choose a default network policy for your +sandboxes: + +```plaintext +Choose a default network policy: + + 1. Open — All network traffic allowed, no restrictions. + 2. Balanced — Default deny, with common dev sites allowed. + 3. Locked Down — All network traffic blocked unless you allow it. + +Use ↑/↓ to navigate, Enter to select, or press 1–3. +``` + +**Balanced** is a good starting point — it permits traffic to common +development services while blocking everything else. You can adjust individual +rules later. See [Policies](security/policy.md) for a full description of each +option. + +> [!NOTE] +> See the [FAQ](faq.md) for details on why sign-in is required and what +> happens with your data. + +## Authenticate your agent + +Agents need credentials for their model provider. How you provide them depends +on the agent. + +For Claude Code with a Claude subscription (Max, Team, or Enterprise), no +upfront setup is needed — use the `/login` command inside the sandbox to sign +in with OAuth. The session token stays on your host and is injected by a +proxy, not stored inside the sandbox. + +For agents that use API keys (or if you prefer API key authentication for +Claude Code), store the key before starting a sandbox: + +```console +$ sbx secret set -g anthropic +``` + +This prompts for the secret value and stores it in your OS keychain. A proxy on +your host injects the key into outbound API requests so it's never exposed +inside the sandbox. See [Credentials](security/credentials.md) for details on +scoping, supported services, and alternative methods. + +To give the agent access to GitHub for creating pull requests or interacting +with repositories: + +```console +$ sbx secret set -g github -t "$(gh auth token)" +``` + +## Run your first sandbox + +Pick a project directory and launch an agent with +[`sbx run`](/reference/cli/sbx/run/): + +```console +$ cd ~/my-project +$ sbx run claude +``` + +Replace `claude` with the agent you want to use — see [Agents](agents/) for the +full list. + +The first run takes a little longer while the agent image is pulled. Subsequent +runs reuse the cached image and start in seconds. + +You can check what's running at any time: + +```console +$ sbx ls +SANDBOX AGENT STATUS PORTS WORKSPACE +claude-my-project claude running ~/my-project +``` + +You can also run `sbx` with no arguments to open an interactive dashboard. +The dashboard shows your sandboxes with live status, lets you attach to +agents, open shells, and manage network rules from one place. See +[Interactive mode](usage.md#interactive-mode) for details. + +![The interactive dashboard showing sandbox status, resource usage, and network governance controls.](images/sbx-dashboard.png) + +## Use branch mode + +By default, the agent edits your working tree directly. To give it its own +Git branch, use `--branch`: + +```console +$ sbx run claude --branch my-feature +``` + +This creates a [Git worktree](https://git-scm.com/docs/git-worktree) under +`.sbx/` in your repository root. The agent works on its own branch and +directory without touching your main working tree. + +When the session ends, review what the agent did from the worktree: + +```console +$ cd .sbx/-worktrees/my-feature +$ git log +$ git diff main +``` + +If you're satisfied, push the branch and open a pull request: + +```console +$ git push -u origin my-feature +$ gh pr create +``` + +Branch mode is especially useful when running multiple agents on the same +repository — each gets its own branch and can't overwrite the other's changes. +See [Branch mode](usage.md#branch-mode) for more options, including +`--branch auto` and multiple branches per sandbox. + +## Manage network access + +Your network policy controls what the sandbox can reach. If the agent fails to +connect to an API or service, it's likely blocked by the policy. + +Check which rules are in effect: + +```console +$ sbx policy ls +``` + +To allow a specific host: + +```console +$ sbx policy allow network registry.npmjs.org +``` + +With **Locked Down**, even your model provider API is blocked unless you +explicitly allow it. With **Balanced**, common development services are +permitted by default. See [Policies](security/policy.md) for the full rule +set and how to customize it. + +## Clean up + +Sandboxes persist after the agent exits. To stop a sandbox without deleting it: + +```console +$ sbx stop my-sandbox +``` + +Installed packages, Docker images, and configuration changes are preserved +across restarts. When you're done with a sandbox, remove it to reclaim disk +space: + +```console +$ sbx rm my-sandbox +``` + +Removing a sandbox deletes everything inside it — installed packages, Docker +images, and any branch mode worktrees under `.sbx/`. Files in your main +working tree are unaffected. + +## Next steps + +- [Usage guide](usage.md) — sandbox management, reconnecting, multiple + workspaces, port forwarding, and more +- [Agents](agents/) — supported agents and configuration +- [Customize](customize/) — build reusable templates or declare capabilities + with kits +- [Credentials](security/credentials.md) — credential storage and management +- [Workspace trust](security/workspace.md) — review agent changes safely +- [Policies](security/policy.md) — control outbound access diff --git a/content/manuals/ai/sandboxes/images/sbx-dashboard.png b/content/manuals/ai/sandboxes/images/sbx-dashboard.png new file mode 100644 index 00000000000..fb454c1d45c Binary files /dev/null and b/content/manuals/ai/sandboxes/images/sbx-dashboard.png differ diff --git a/content/manuals/ai/sandboxes/images/sbx-security.png b/content/manuals/ai/sandboxes/images/sbx-security.png new file mode 100644 index 00000000000..0f86c436c50 Binary files /dev/null and b/content/manuals/ai/sandboxes/images/sbx-security.png differ diff --git a/content/manuals/ai/sandboxes/security/_index.md b/content/manuals/ai/sandboxes/security/_index.md new file mode 100644 index 00000000000..12a2cb72214 --- /dev/null +++ b/content/manuals/ai/sandboxes/security/_index.md @@ -0,0 +1,106 @@ +--- +title: Security model +linkTitle: Security model +weight: 50 +description: Trust boundaries, isolation layers, and security properties of Docker Sandboxes. +keywords: docker sandboxes, security model, isolation, trust boundaries, microVM +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Docker Sandboxes run AI agents in microVMs so they can execute code, install +packages, and use tools without accessing your host system. Multiple isolation +layers protect your host system. + +## Trust boundaries + +The primary trust boundary is the microVM. The agent has full control inside +the VM, including sudo access. The VM boundary prevents the agent from reaching +anything on your host except what is explicitly shared. + +What crosses the boundary into the VM: + +- **Workspace directory:** mounted into the VM with read-write access. With + the default direct mount, changes the agent makes appear on your host + immediately. +- **Credentials:** the host-side proxy injects authentication headers into + outbound HTTP requests. The raw credential values never enter the VM. +- **Network access:** HTTP and HTTPS requests to + [allowed domains](defaults/) are proxied through the host. + +What crosses the boundary back to the host: + +- **Workspace file changes:** visible on your host in real time with the + default direct mount. +- **HTTP/HTTPS requests:** sent to allowed domains through the host proxy. + +Everything else is blocked. The agent cannot access your host filesystem +(outside the workspace), your host Docker daemon, your host network or +localhost, other sandboxes, or any domain not in the allow list. Raw TCP, UDP, +and ICMP are blocked at the network layer. + +![Sandbox security model showing the hypervisor boundary between the sandbox VM and the host system. The workspace directory is shared read-write. The agent process, Docker Engine, packages, and VM filesystem are inside the VM. Host filesystem, processes, Docker Engine, and network are outside the VM and not accessible. A proxy enforces allow/deny policies and injects credentials into outbound requests.](../images/sbx-security.png) + +## Isolation layers + +The sandbox security model has four layers. See +[Isolation layers](isolation/) for technical details on each. + +- **Hypervisor isolation:** separate kernel per sandbox. No shared memory or + processes with the host. +- **Network isolation:** all HTTP/HTTPS traffic proxied through the host. + [Deny-by-default policy](defaults/). Non-HTTP protocols blocked entirely. +- **Docker Engine isolation:** each sandbox has its own Docker Engine with no + path to the host daemon. +- **Credential isolation:** API keys are injected into HTTP headers by the + host-side proxy. Credential values never enter the VM. + +## What the agent can do inside the sandbox + +Inside the VM, the agent has full privileges: sudo access, package installation, +a private Docker Engine, and read-write access to the workspace. Installed +packages, Docker images, and other VM state persist across restarts. See +[Default security posture](defaults/) for the full breakdown of what is +permitted and what is blocked. + +## What is not isolated by default + +The sandbox isolates the agent from your host system, but the agent's actions +can still affect you through the shared workspace and allowed network channels. + +**Workspace changes are live on your host.** The agent edits the same files you +see on your host. This includes files that execute implicitly during normal +development: Git hooks, CI configuration, IDE task configs, `Makefile`, +`package.json` scripts, and similar build files. Review changes before running +any modified code. Note that Git hooks live inside `.git/` and do not appear in +`git diff` output. Check them separately. +See [Workspace trust](workspace/). + +**Default allowed domains include broad wildcards.** Some defaults like +`*.googleapis.com` cover many services beyond AI APIs. Run `sbx policy ls` to +see the full list of active rules, and remove entries you don't need. See +[Default security posture](defaults/). + +## Organization-wide control + +On a single developer's machine, network and filesystem policies are +configured locally with `sbx policy`. Admins can also centrally define those +policies in the Docker Admin Console. When organization governance is active, +the centrally defined rules apply uniformly across every sandbox in the +organization and take precedence over local rules. Admins can optionally +delegate specific rule types back to local control so developers can add +additional allow rules. + +See [Organization governance](governance/) for details. + +## Learn more + +- [Isolation layers](isolation/): how hypervisor, network, Docker, and + credential isolation work +- [Default security posture](defaults/): what a fresh sandbox permits and + blocks +- [Credentials](credentials/): how to provide and manage API keys +- [Policies](policy/): how to customize network access rules +- [Organization governance](governance/): centrally manage policies across + an organization +- [Workspace trust](workspace/): what to review after an agent session diff --git a/content/manuals/ai/sandboxes/security/credentials.md b/content/manuals/ai/sandboxes/security/credentials.md new file mode 100644 index 00000000000..9def46167b1 --- /dev/null +++ b/content/manuals/ai/sandboxes/security/credentials.md @@ -0,0 +1,243 @@ +--- +title: Credentials +weight: 20 +description: How Docker Sandboxes handle API keys and authentication credentials for sandboxed agents. +keywords: docker sandboxes, credentials, api keys, authentication, proxy, ssh agent, secrets +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Most agents need an API key for their model provider. An HTTP/HTTPS proxy on +your host intercepts outbound requests from the sandbox, looks up the matching +credential on the host, and overwrites the auth header before forwarding. The +real credential stays on the host; the sandbox sees only a sentinel value. For +the security model behind this, see +[Credential isolation](isolation.md#credential-isolation). + +## How credential injection works + +The proxy needs three things to inject a credential: which outbound traffic to +match, what header to write, and what value to use. The kit (or built-in agent +definition) declares the first two. You provide the value on the host. + +There are two host-side stores, plus a host shell fallback: + +- Stored secrets, keyed on a service identifier: built-in agents declare + service identifiers (`anthropic`, `openai`, `github`, etc.) in their kit + specs; custom kits can declare their own. `sbx secret set` stores a value + keyed on that identifier. When a sandboxed request matches a service's + domain, the proxy reads the stored value and writes the configured header. + Inside the sandbox, the environment variable holds a sentinel like + `proxy-managed`, so SDKs that read the variable see something non-empty + without seeing the real secret. See [Stored secrets](#stored-secrets). + +- Stored secrets, keyed on a target domain and environment variable name: + `sbx secret set-custom` stores a value alongside a target domain, an + environment variable name, and an optional placeholder. The sandbox sees + the placeholder; the proxy substitutes it with the real value anywhere it + appears in outbound traffic to that domain. Use this when the + service-identifier model doesn't fit — for example, when the agent + validates the variable format at boot, or when the credential lands in a + request body. See [Custom secrets](#custom-secrets). + +- Host shell environment variables: as a fallback, the proxy reads from your + shell environment. Useful for one-off testing or development; stored + secrets are preferred because shell environment variables are plaintext + and visible to other processes running as your user. See + [Environment variables](#environment-variables). + +If both a stored secret and a host environment variable are set for the same +service, the stored secret takes precedence. For multi-provider agents +(OpenCode, Docker Agent), the proxy selects credentials based on the API +endpoint being called. See individual [agent pages](../agents/) for +provider-specific details. + +## Stored secrets + +`sbx secret set` stores credentials in your OS keychain, keyed on a service +identifier. Built-in agents declare a fixed set of services. Custom kits can +declare their own. The same `sbx secret set` flow works for both. + +### Store a secret + +```console +$ sbx secret set -g anthropic +``` + +This prompts you for the secret value interactively. The `-g` flag stores the +secret globally so it's available to all sandboxes. To scope a secret to a +specific sandbox instead: + +```console +$ sbx secret set my-sandbox openai +``` + +> [!NOTE] +> A sandbox-scoped secret takes effect immediately, even if the sandbox is +> running. A global secret (`-g`) only applies when a sandbox is created. If +> you set or change a global secret while a sandbox is running, recreate the +> sandbox for the new value to take effect. + +You can also pipe in a value for non-interactive use: + +```console +$ echo "$ANTHROPIC_API_KEY" | sbx secret set -g anthropic +``` + +### Built-in services + +Each built-in service name maps to a set of environment variables the proxy +checks and the API domains it authenticates requests to: + +| Service | Environment variables | API domains | +| ----------- | ---------------------------------- | ----------------------------------- | +| `anthropic` | `ANTHROPIC_API_KEY` | `api.anthropic.com` | +| `aws` | `AWS_ACCESS_KEY_ID` | AWS Bedrock endpoints | +| `github` | `GH_TOKEN`, `GITHUB_TOKEN` | `api.github.com`, `github.com` | +| `google` | `GEMINI_API_KEY`, `GOOGLE_API_KEY` | `generativelanguage.googleapis.com` | +| `groq` | `GROQ_API_KEY` | `api.groq.com` | +| `mistral` | `MISTRAL_API_KEY` | `api.mistral.ai` | +| `nebius` | `NEBIUS_API_KEY` | `api.studio.nebius.ai` | +| `openai` | `OPENAI_API_KEY` | `api.openai.com` | +| `xai` | `XAI_API_KEY` | `api.x.ai` | + +When you store a secret with `sbx secret set -g `, the proxy uses it +the same way it would use the corresponding environment variable. You don't +need to set both. + +### Services declared by kits + +Custom kits can declare their own service identifiers in `spec.yaml` — +they're not limited to the table above. To provide a credential for a +kit-declared service, run `sbx secret set` with the same identifier the kit +declares under `credentials.sources`: + +```console +$ sbx secret set -g my-service +``` + +There's no separate registration step; the keychain entry is keyed on the +identifier the kit already uses. See +[Authenticate to external services](../customize/kits.md#authenticate-to-external-services) +for the kit-side wiring. + +### List and remove secrets + +List all stored secrets: + +```console +$ sbx secret ls +SCOPE SERVICE SECRET +(global) github gho_GCaw4o****...****43qy +``` + +Remove a secret: + +```console +$ sbx secret rm -g github +``` + +> [!NOTE] +> Running `sbx reset` deletes all stored secrets along with all sandbox state. +> You'll need to re-add your secrets after a reset. + +### GitHub token + +The `github` service gives the agent access to the `gh` CLI inside the +sandbox. Pass your existing GitHub CLI token: + +```console +$ echo "$(gh auth token)" | sbx secret set -g github +``` + +This is useful for agents that create pull requests, open issues, or interact +with GitHub APIs on your behalf. + +### SSH agent + +If your host has an SSH agent and `SSH_AUTH_SOCK` is set, Docker Sandboxes +forwards the agent into the sandbox and sets `SSH_AUTH_SOCK` there. The +private keys stay on your host. Processes inside the sandbox can request +signatures from the forwarded agent, but they can't read or copy the private +key. + +Use SSH agent forwarding for Git operations over SSH and SSH-based commit +signing. The signing key must be loaded in the host SSH agent for sandboxed +commit signing to work. Outbound SSH connections are still subject to sandbox +network policy. For details, see +[Signed commits](../usage.md#signed-commits). + +## Custom secrets + +> [!IMPORTANT] +> Custom secrets are experimental. The `set-custom` command is hidden +> from `sbx --help`, and behavior, flags, and the placeholder format may +> change. + +For credentials that don't fit the service-identifier model — for example, +when an agent validates the environment variable format at boot, or when the +credential lands in a request body rather than a header — use +`sbx secret set-custom`. The secret is keyed on a target domain, an +environment variable name, and an optional placeholder string, instead of a +service identifier. + +```console +$ sbx secret set-custom -g \ + --host api.example.com \ + --env API_KEY \ + --value +``` + +> [!WARNING] +> Passing the secret as `--value ` records it in your shell history +> and exposes it to other processes running as your user. Avoid pasting +> real credentials inline — read the value from a variable that's already +> in your environment, and clear shell history if a real secret was passed +> on the command line. + +Inside the sandbox, `API_KEY` is set to a generated placeholder (for example, +`sbx-cs-`). When a sandboxed process sends a request to +`api.example.com` and the placeholder appears anywhere in the request, the +proxy replaces it with the real value. The agent never sees the real secret. + +Prefer the [service-based flow](#stored-secrets) whenever it's an option — +the kit handles the wiring; you only provide the value. + +## Environment variables + +As an alternative to stored secrets, export the relevant environment variable +in your shell before running a sandbox: + +```console +$ export ANTHROPIC_API_KEY=sk-ant-api03-xxxxx +$ sbx run claude +``` + +The proxy reads the variable from your terminal session. See individual +[agent pages](../agents/) for the variable names each agent expects. + +> [!NOTE] +> These environment variables are set on your host, not inside the sandbox. +> Sandbox agents are pre-configured to use credentials managed by the +> host-side proxy. For custom environment variables not tied to a +> [built-in service](#built-in-services), see +> [Setting custom environment variables](../faq.md#how-do-i-set-custom-environment-variables-inside-a-sandbox). + +## Best practices + +- Use [stored secrets](#stored-secrets) over environment variables. The OS + keychain encrypts credentials at rest and controls access, while environment + variables are plaintext in your shell. +- Don't set API keys manually inside the sandbox. Sandbox agents are + pre-configured to use proxy-managed credentials. +- For Claude Code and Codex, OAuth is another secure option: the flow runs on + the host, so the token is never exposed inside the sandbox. For Claude Code, + use `/login` inside the agent. For Codex, run `sbx secret set -g openai --oauth`. + +## Custom templates and placeholder values + +When building custom templates or installing agents manually in a shell +sandbox, some agents require environment variables like `OPENAI_API_KEY` to be +set before they start. Set these to placeholder values (e.g. `proxy-managed`) +if needed. The proxy injects actual credentials regardless of the environment +variable value. diff --git a/content/manuals/ai/sandboxes/security/defaults.md b/content/manuals/ai/sandboxes/security/defaults.md new file mode 100644 index 00000000000..fa7c26869a4 --- /dev/null +++ b/content/manuals/ai/sandboxes/security/defaults.md @@ -0,0 +1,71 @@ +--- +title: Default security posture +linkTitle: Defaults +weight: 15 +description: What a sandbox permits and blocks before you change any settings. +keywords: docker sandboxes, security defaults, network policy, credentials, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +A sandbox created with `sbx run` and no additional flags has the following +security posture. + +## Network defaults + +All outbound HTTP and HTTPS traffic is blocked unless an explicit rule allows +it (deny-by-default). All non-HTTP protocols (raw TCP, UDP including DNS, and +ICMP) are blocked at the network layer. Traffic to private IP ranges, loopback +addresses, and link-local addresses is also blocked. + +Run `sbx policy ls` to see the active allow rules for your installation. To +customize network access, see [Policies](policy.md). If your organization +manages sandbox policies centrally, those rules apply on top of the defaults +described here. See [Organization governance](governance.md). + +## Workspace defaults + +Sandboxes use a direct mount by default. The agent sees and modifies your +working tree directly, and changes appear on your host immediately. + +The agent can read, write, and delete any file within the workspace directory, +including hidden files, configuration files, build scripts, and Git hooks. +See [Workspace trust](workspace.md) for what to review after an agent session. + +## Credential defaults + +No credentials are available to the sandbox unless you provide them using +`sbx secret` or environment variables. When credentials are provided, the +host-side proxy injects them into outbound HTTP headers. The agent cannot +read the raw credential values. + +See [Credentials](credentials.md) for setup instructions. + +## Agent capabilities inside the sandbox + +The agent runs with full control inside the sandbox VM: + +- `sudo` access (the agent runs as a non-root user with sudo privileges) +- A private Docker Engine for building images and running containers +- Package installation through `apt`, `pip`, `npm`, and other package managers +- Full read and write access to the VM filesystem + +Everything the agent installs or creates inside the VM, including packages, +Docker images, and configuration changes, persists across stop and restart +cycles. When you remove the sandbox with `sbx rm`, the VM and its contents +are deleted. Only workspace files remain on the host. + +## What is blocked by default + +The following are blocked for all sandboxes and cannot be changed through +policy configuration: + +- Host filesystem access outside the workspace directory +- Host Docker daemon +- Host network and localhost +- Communication between sandboxes +- Raw TCP, UDP, and ICMP connections +- Traffic to private IP ranges and link-local addresses + +Outbound HTTP/HTTPS to domains not in the allow list is also blocked by +default, but you can add allow rules with `sbx policy allow`. diff --git a/content/manuals/ai/sandboxes/security/governance.md b/content/manuals/ai/sandboxes/security/governance.md new file mode 100644 index 00000000000..db758e9d1ad --- /dev/null +++ b/content/manuals/ai/sandboxes/security/governance.md @@ -0,0 +1,154 @@ +--- +title: Organization governance +linkTitle: Org governance +weight: 35 +description: Centrally manage sandbox network and filesystem policies for your organization. +keywords: docker sandboxes, governance, organization policy, AI governance, admin console, network access, filesystem access +--- + +> [!NOTE] +> Sandbox organization governance is available on a separate paid +> subscription. +> [Contact Docker Sales](https://www.docker.com/products/ai-governance/#contact-sales) +> to request access. + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +This page covers how to configure organization policies in the Docker Admin +Console under AI governance settings. For local sandbox policies that +individual users configure on their own machine, see [Policies](policy.md). + +Sandbox network and filesystem policies defined in the +[Docker Admin Console](https://app.docker.com/admin) apply uniformly to every +sandbox in the organization. Rules are enforced across all developers' +machines, take precedence over local `sbx policy` rules, and can't be +overridden by individual users. Admins can optionally +[delegate](#delegate-rules-to-local-policy) specific rule types back to local +control so developers can add additional allow rules. + +## Network policies + +### Configuring org-level network rules + +Define network allow and deny rules in the Admin Console under +**AI governance > Network access**. Each rule takes a network target (domain, +wildcard, or CIDR range) and an action (allow or deny). You can add multiple +entries at once, one per line. + +Rules support exact domains (`example.com`), wildcard subdomains +(`*.example.com`), and optional port suffixes (`example.com:443`). + +`example.com` doesn't match subdomains, and `*.example.com` doesn't match +the root domain. Specify both to cover both. + +### Delegate rules to local policy + +When organization governance is active, local rules are ignored by default — +only the organization policy is in effect. Admins can delegate a rule type +back to local policy by turning on the **User defined** setting for that +rule type in AI governance settings. Turning the setting on delegates the +rule type: local `sbx policy` rules of that type are evaluated alongside +organization rules, letting users add hosts to the allowlist from their own +machine. + +If a rule type isn't delegated, local rules of that type still appear in +`sbx policy ls` but with an `inactive` status and a note that the +organization hasn't delegated the rule type to local policy: + +```console +$ sbx policy ls +NAME TYPE ORIGIN DECISION STATUS RESOURCES +balanced-dev network local allow inactive — corporate policy takes precedence and does api.anthropic.com + not delegate this rule type to local policy. +allow AI services network remote allow active api.anthropic.com + api.openai.com +allow Docker services network remote allow active *.docker.com + *.docker.io +``` + +Organization rules show up with `remote` in the `ORIGIN` column. + +Delegated local rules can expand access for domains the organization hasn't +explicitly denied, but can't override organization-level deny rules. This +applies to exact matches and wildcard matches alike; if the organization denies +`*.example.com`, a local allow for `api.example.com` has no effect because the +org-level wildcard deny covers it. + +For example, given an organization policy that allows `api.anthropic.com` +and denies `*.corp.internal`: + +- `sbx policy allow network api.example.com` — works, because the + organization hasn't denied `api.example.com` +- `sbx policy allow network build.corp.internal` — no effect, because the + organization denies `*.corp.internal` + +#### Blocked values in delegated rules + +To prevent overly broad rules from undermining the organization's policy, +certain catch-all values are blocked in delegated local rules: + +- Domain patterns: `*`, `**`, `*.com`, `**.com`, `*.*`, `**.**` +- CIDR ranges: `0.0.0.0/0`, `::/0` + +Scoped wildcards like `*.example.com` are still allowed. If a user attempts +to use a blocked value, `sbx policy` returns an error immediately. + +## Filesystem policies + +Filesystem policies control which host paths a sandbox can mount as +workspaces. By default, sandboxes can mount any directory the user has +access to. + +Admins can restrict which paths are mountable by defining filesystem allow +and deny rules in the Admin Console under **AI governance > Filesystem +access**. Each rule takes a path pattern and an action (allow or deny). + +> [!CAUTION] +> Use `**` (double wildcard) rather than `*` (single wildcard) when writing +> path patterns to match path segments recursively. A single `*` only matches +> within a single path segment. For example, `~/**` matches all paths under +> the user's home directory, whereas `~/*` matches only paths directly +> under `~`. + +## Precedence + +Within any layer, deny rules beat allow rules. If a domain matches both, it's +blocked regardless of specificity. Outbound traffic is blocked unless a rule +allows it. + +When organization governance is active, local rules are not evaluated. Only +organization rules set in the Admin Console determine what is allowed or +denied. Organization-level denials can't be overridden locally. + +If the admin [delegates](#delegate-rules-to-local-policy) a rule type to +local policy by turning on the **User defined** setting, local rules of +that type are also evaluated alongside organization rules. Delegated local +rules can expand access for domains the organization hasn't explicitly +denied, but can't override organization-level denials. + +The same model applies to filesystem policies: organization-level rules take +precedence over local behavior. + +To unblock a domain, identify where the deny rule comes from. For local +rules, remove it with `sbx policy rm`. For organization-level rules, update +the rule in the Admin Console. + +## Troubleshooting + +### Policy changes not taking effect + +After updating organization policies in the Admin Console, changes take up +to 5 minutes to propagate to developer machines. To apply changes +immediately, users can run `sbx policy reset`, which stops the daemon and +forces it to pull the latest organization policies on the next `sbx` +command. + +> [!WARNING] +> `sbx policy reset` deletes all locally configured policy rules. The command +> prompts for confirmation before proceeding. + +### Sandbox cannot mount workspace + +If a sandbox fails to mount with a `mount policy denied` error, verify that +the filesystem allow rule in the Admin Console uses `**` rather than `*`. A +single `*` doesn't match across directory separators. diff --git a/content/manuals/ai/sandboxes/security/isolation.md b/content/manuals/ai/sandboxes/security/isolation.md new file mode 100644 index 00000000000..b533c638813 --- /dev/null +++ b/content/manuals/ai/sandboxes/security/isolation.md @@ -0,0 +1,87 @@ +--- +title: Isolation layers +weight: 10 +description: How Docker Sandboxes isolate AI agents using hypervisor, network, Docker Engine, and credential boundaries. +keywords: docker sandboxes, isolation, hypervisor, network, credentials +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +AI coding agents need to execute code, install packages, and run tools on +your behalf. Docker Sandboxes run each agent in its own microVM with four +isolation layers: hypervisor, network, Docker Engine, and credential proxy. + +## Hypervisor isolation + +Every sandbox runs inside a lightweight microVM with its own Linux kernel. +Unlike containers, which share the host kernel, a sandbox VM cannot access host +processes, files, or resources outside its defined boundaries. + +- **Process isolation:** separate kernel per sandbox; processes inside the VM + are invisible to your host and to other sandboxes +- **Filesystem isolation:** only your workspace directory is shared with the + host. The rest of the VM filesystem persists across restarts but is removed + when you delete the sandbox. Symlinks pointing outside the workspace scope + are not followed. +- **Full cleanup:** when you remove a sandbox with `sbx rm`, the VM and + everything inside it is deleted + +The agent runs as a non-root user with sudo privileges inside the VM. The +hypervisor boundary is the isolation control, not in-VM privilege separation. + +## Network isolation + +Each sandbox has its own isolated network. Sandboxes cannot communicate with +each other and cannot reach your host's localhost. There is no shared network +between sandboxes or between a sandbox and your host. + +All HTTP and HTTPS traffic leaving a sandbox passes through a proxy on your +host that enforces the [network policy](policy.md). The sandbox routes +traffic through either a forward proxy or a transparent proxy depending on the +client's configuration. Both enforce the network policy; only the forward proxy +[injects credentials](credentials.md) for AI services. + +Raw TCP connections, UDP, and ICMP are blocked at the network layer. DNS +resolution is handled by the proxy; the sandbox cannot make raw DNS queries. +Traffic to private IP ranges, loopback, and link-local addresses is also +blocked. Only domains explicitly listed in the policy are reachable. + +For the default set of allowed domains, see +[Default security posture](defaults.md). + +## Docker Engine isolation + +Agents often need to build images, run containers, and use Docker Compose. +Mounting your host Docker socket into a container would give the agent full +access to your environment. + +Docker Sandboxes avoid this by running a separate [Docker +Engine](https://docs.docker.com/engine/) inside the sandbox environment, isolated from +your host. When the agent runs `docker build` or `docker compose up`, those +commands execute against that engine. The agent has no path to your host Docker +daemon. + +```plaintext +Host system + ├── Host Docker daemon + │ └── Your containers and images + │ + └── Sandbox Docker engine (isolated from host) + ├── [VM] Agent container — sandbox 1 + │ └── [VM] Containers created by agent + └── [VM] Agent container — sandbox 2 + └── [VM] Containers created by agent +``` + +## Credential isolation + +Most agents need API keys for their model provider. Rather than passing keys +into the sandbox, the host-side proxy intercepts outbound API requests and +injects authentication headers before forwarding each request. + +Credential values are never stored inside the VM. They are not available as +environment variables or files inside the sandbox unless you explicitly set +them. This means a compromised sandbox cannot read API keys from the local +environment. + +For how to store and manage credentials, see [Credentials](credentials.md). diff --git a/content/manuals/ai/sandboxes/security/policy.md b/content/manuals/ai/sandboxes/security/policy.md new file mode 100644 index 00000000000..29c33cbb5dc --- /dev/null +++ b/content/manuals/ai/sandboxes/security/policy.md @@ -0,0 +1,229 @@ +--- +title: Policies +weight: 30 +description: Configure network access rules for sandboxes. +keywords: docker sandboxes, policies, network access, allow rules, deny rules +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Sandboxes are [network-isolated](isolation.md) from your host and from each +other. A policy system controls what a sandbox can access over the network. + +Use the `sbx policy` command to configure network access rules. Rules apply +to all sandboxes on the machine. + +If your organization manages sandbox policies centrally, organization rules +take precedence over the local rules described on this page. See +[Organization governance](governance.md). + +## Network policies + +The only way traffic can leave a sandbox is through an HTTP/HTTPS proxy on +your host, which enforces access rules on every outbound request. + +Non-HTTP TCP traffic, including SSH, can be allowed by adding a policy rule +for the destination IP address and port (for example, +`sbx policy allow network "10.1.2.3:22"`). UDP and ICMP traffic is blocked +at the network layer and can't be unblocked with policy rules. + +### Initial policy selection + +On first start, and after running `sbx policy reset`, the daemon prompts you to +choose a network policy: + +```plaintext +Choose a default network policy: + + 1. Open — All network traffic allowed, no restrictions. + 2. Balanced — Default deny, with common dev sites allowed. + 3. Locked Down — All network traffic blocked unless you allow it. + + Use ↑/↓ to navigate, Enter to select, or press 1–3. +``` + +| Policy | Description | +| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Open | All outbound traffic is allowed. No restrictions. Equivalent to adding a wildcard allow rule with `sbx policy allow network "**"`. | +| Balanced | Default deny, with a baseline allowlist covering AI provider APIs, package managers, code hosts, container registries, and common cloud services. You can extend this with `sbx policy allow`. | +| Locked Down | All outbound traffic is blocked, including model provider APIs (for example, `api.anthropic.com`). You must explicitly allow everything you need. | + +You can change your effective policy at any time using `sbx policy allow` and +`sbx policy deny`, or start over by running `sbx policy reset`. + +> [!NOTE] +> If your organization manages sandbox policies centrally, organization rules +> take precedence over the policy you select here. See +> [Organization governance](governance.md). + +### Non-interactive environments + +In non-interactive environments such as CI pipelines or headless servers, the +interactive prompt can't be displayed. Use `sbx policy set-default` to set the +default network policy before running any other `sbx` commands: + +```console +$ sbx policy set-default balanced +``` + +Available values are `allow-all`, `balanced`, and `deny-all`. After setting the +default, you can customize further with `sbx policy allow` and +`sbx policy deny` as usual. + +### Default policy + +All outbound HTTP/HTTPS traffic is blocked by default unless an explicit rule +allows it. The **Balanced** policy ships with a baseline allowlist covering AI provider +APIs, package managers, code hosts, container registries, and common cloud +services. Run `sbx policy ls` to see the active rules for your installation. + +### Managing rules + +Use [`sbx policy allow`](/reference/cli/sbx/policy/allow/) and +[`sbx policy deny`](/reference/cli/sbx/policy/deny/) to add network access +rules. Changes take effect immediately and apply to all sandboxes: + +```console +$ sbx policy allow network api.anthropic.com +$ sbx policy deny network ads.example.com +``` + +Specify multiple hosts in one command with a comma-separated list: + +```console +$ sbx policy allow network "api.anthropic.com,*.npmjs.org,*.pypi.org" +``` + +List all active policy rules with `sbx policy ls`: + +```console +$ sbx policy ls +ID TYPE DECISION RESOURCES +a1b2c3d4-e5f6-7890-abcd-ef1234567890 network allow api.anthropic.com, *.npmjs.org +f9e8d7c6-b5a4-3210-fedc-ba0987654321 network deny ads.example.com +``` + +Use `--type network` to show only network policies. + +Remove a policy by resource or by rule ID: + +```console +$ sbx policy rm network --resource ads.example.com +$ sbx policy rm network --id 2d3c1f0e-4a73-4e05-bc9d-f2f9a4b50d67 +``` + +### Resetting to defaults + +To remove all custom policies and restore the default policy, use +`sbx policy reset`: + +```console +$ sbx policy reset +``` + +This deletes the local policy store and stops the daemon. When the daemon +restarts on the next command, you are prompted to choose a new network policy. +If sandboxes are running, they stop when the daemon shuts down. You are prompted for +confirmation unless you pass `--force`: + +```console +$ sbx policy reset --force +``` + +### Switching to allow-by-default + +If you prefer a permissive policy where all outbound traffic is allowed, add +a wildcard allow rule: + +```console +$ sbx policy allow network "**" +``` + +This lets agents install packages and call any external API without additional +configuration. You can still deny specific hosts with `sbx policy deny`. + +### Wildcard syntax + +Rules support exact domains (`example.com`), wildcard subdomains +(`*.example.com`), and optional port suffixes (`example.com:443`). + +Note that `example.com` doesn't match subdomains, and `*.example.com` doesn't +match the root domain. Specify both to cover both. + +### Common patterns + +Allow access to package managers so agents can install dependencies: + +```console +$ sbx policy allow network "*.npmjs.org,*.pypi.org,files.pythonhosted.org,github.com" +``` + +The **Balanced** policy already includes AI provider APIs, package managers, +code hosts, and container registries. You only need to add allow rules for +additional domains your workflow requires. If you chose **Locked Down**, you +must explicitly allow everything. + +> [!WARNING] +> Allowing broad domains like `github.com` permits access to any content on +> that domain, including user-generated content. Only allow domains you trust +> with your data. + +### Monitoring + +Use `sbx policy log` to see which hosts your sandboxes have contacted: + +```console +$ sbx policy log +Blocked requests: +SANDBOX TYPE HOST PROXY RULE REASON LAST SEEN COUNT +my-sandbox network blocked.example.com transparent domain-blocked default-deny 10:15:25 29-Jan 1 + +Allowed requests: +SANDBOX TYPE HOST PROXY RULE REASON LAST SEEN COUNT +my-sandbox network api.anthropic.com forward domain-allowed 10:15:23 29-Jan 42 +my-sandbox network registry.npmjs.org forward-bypass domain-allowed 10:15:20 29-Jan 18 +my-sandbox network app.example.com browser-open 10:15:10 29-Jan 1 +``` + +The **PROXY** column shows how the request left the sandbox: + +| Value | Description | +| ---------------- | -------------------------------------------------------------------------------------------------------------- | +| `forward` | Routed through the forward proxy. Supports [credential injection](credentials.md). | +| `forward-bypass` | Routed through the forward proxy without credential injection. | +| `transparent` | Intercepted by the transparent proxy. Policy is enforced but credential injection is not available. | +| `network` | Non-HTTP traffic (raw TCP, UDP, ICMP). TCP can be allowed with a policy rule; UDP and ICMP are always blocked. | +| `browser-open` | A sandbox process requested opening a URL in the host browser. Policy is enforced before opening the URL. | + +The **RULE** column identifies the policy rule that matched the request. The +**REASON** column includes extra context when the daemon records one. + +Filter by sandbox name by passing it as an argument: + +```console +$ sbx policy log my-sandbox +``` + +Use `--limit N` to show only the last `N` entries, `--json` for +machine-readable output, or `--type network` to filter by policy type. + +## Precedence + +All outbound traffic is blocked by default unless an explicit rule allows it. +If a domain matches both an allow and a deny rule, the deny rule wins +regardless of specificity. + +To unblock a domain, find the deny rule with `sbx policy ls` and remove it +with `sbx policy rm`. + +If your organization manages sandbox policies centrally, organization rules +take precedence and local rules are not evaluated unless the admin delegates +that rule type. See [Organization governance](governance.md). + +## Troubleshooting + +### Policy changes not taking effect + +If policy changes aren't taking effect, run `sbx policy reset` to wipe the +local policy store and restart the daemon. On next start, you are prompted to +choose a new network policy. diff --git a/content/manuals/ai/sandboxes/security/workspace.md b/content/manuals/ai/sandboxes/security/workspace.md new file mode 100644 index 00000000000..900677b50a9 --- /dev/null +++ b/content/manuals/ai/sandboxes/security/workspace.md @@ -0,0 +1,74 @@ +--- +title: Workspace trust +weight: 40 +description: | + How sandboxed agents interact with your workspace files and what to review + after an agent session. +keywords: docker sandboxes, workspace trust, file access, review, sbx +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +Agents running in sandboxes have full access to the workspace directory without +prompting. With the default direct mount, changes the agent makes appear on +your host immediately. Treat sandbox-modified workspace files the same way +you would treat a pull request from an untrusted contributor: review before +you trust them on your host. + +## What the agent can modify + +The agent can create, modify, and delete any file in the workspace. This +includes: + +- Source code files +- Configuration files (`.eslintrc`, `pyproject.toml`, `.env`, etc.) +- Build files (`Makefile`, `package.json`, `Cargo.toml`) +- Git hooks (`.git/hooks/`) +- CI configuration (`.github/workflows/`, `.gitlab-ci.yml`) +- IDE configuration (`.vscode/tasks.json`, `.idea/` run configurations) +- Hidden files and directories +- Shell scripts and executables + +> [!CAUTION] +> Files like Git hooks, CI configuration, IDE task configs, and build scripts +> execute code when triggered by normal development actions such as committing, +> building, or opening the project in an IDE. Review these files after any agent +> session before performing those actions. + +## Branch mode + +The `--branch` flag lets the agent work on a separate branch. This is a +workflow convenience, not a security boundary: the agent still mounts the full +repository. See the [usage guide](../usage.md) for details. + +## Reviewing changes + +After an agent session, review changes before executing any code the agent +touched. + +With the default direct mount, changes are in your working tree: + +```console +$ git diff +``` + +If you used `--branch`, the agent's changes are on a separate branch: + +```console +$ git diff main..my-feature +``` + +Pay particular attention to: + +- **Git hooks** (`.git/hooks/`): run on commit, push, and other Git actions. + These are inside `.git/` and **do not appear in `git diff` output**. Check + them separately with `ls -la .git/hooks/`. +- **CI configuration** (`.github/workflows/`, `.gitlab-ci.yml`): runs on push +- **Build files** (`Makefile`, `package.json` scripts, `Cargo.toml`): run + during build or install steps +- **IDE configuration** (`.vscode/tasks.json`, `.idea/`): can run tasks when + you open the project +- **Executable files and shell scripts**: can run directly + +These files execute code without you explicitly running them. Review them before +committing, building, or opening the project in an IDE. diff --git a/content/manuals/ai/sandboxes/troubleshooting.md b/content/manuals/ai/sandboxes/troubleshooting.md new file mode 100644 index 00000000000..8e4c1b3006b --- /dev/null +++ b/content/manuals/ai/sandboxes/troubleshooting.md @@ -0,0 +1,262 @@ +--- +title: Troubleshooting +weight: 60 +description: Resolve common issues when using Docker Sandboxes. +keywords: docker sandboxes, sbx, troubleshooting, diagnostics, reset, network policy, git, ssh +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +## Run diagnostics + +Before digging into a specific issue, run +[`sbx diagnose`](/reference/cli/sbx/diagnose/) to check for common problems +with your installation, such as a missing CLI binary, an unresponsive daemon, +a CLI/daemon version mismatch, missing storage directories, or broken +authentication. + +```console +$ sbx diagnose +``` + +The command prints a summary of checks that passed, warned, or failed, along +with suggested fixes. Use `--output json` to get machine-readable output, or +`--output github-issue` to generate a Markdown snippet suitable for pasting +into a GitHub issue. + +## Resetting sandboxes + +If you hit persistent issues or corrupted state, run +[`sbx reset`](/reference/cli/sbx/reset/) to stop all VMs and delete all sandbox +data. Create fresh sandboxes afterwards. + +## Agent can't install packages or reach an API + +Sandboxes use a [deny-by-default network policy](security/policy.md). +If the agent fails to install packages or call an external API, the target +domain is likely not in the allow list. Check which requests are being blocked: + +```console +$ sbx policy log +``` + +Then allow the domains your workflow needs: + +```console +$ sbx policy allow network "*.npmjs.org,*.pypi.org,files.pythonhosted.org" +``` + +To allow all outbound traffic instead: + +```console +$ sbx policy allow network "**" +``` + +If `sbx policy allow` doesn't unblock the request, your organization may +manage sandbox policies centrally and take precedence over local rules. See +[Organization governance](security/governance.md). + +## SSH and other non-HTTP connections fail + +Non-HTTP TCP connections like SSH can be allowed by adding a policy rule for +the destination IP address and port. For example, to allow SSH to a specific +host: + +```console +$ sbx policy allow network "10.1.2.3:22" +``` + +Hostname-based rules (for example, `myhost:22`) don't work for non-HTTP +connections because the proxy can't resolve the hostname to an IP address in +this context. Use the IP address directly. + +UDP and ICMP traffic is blocked at the network layer and can't be unblocked +with policy rules. + +For Git operations over SSH, you can either add an allow rule for the Git +server's IP address or use HTTPS URLs instead: + +```console +$ git clone https://github.com/owner/repo.git +``` + +## Can't reach a service running on the host + +If a request to `127.0.0.1` or a local network IP returns "connection refused" +from inside a sandbox, the address is not reachable from within the sandbox VM. +See [Accessing host services from a sandbox](usage.md#accessing-host-services-from-a-sandbox). + +## Docker authentication failure + +If you see a message like `You are not authenticated to Docker`, your login +session has expired. In an interactive terminal, the CLI prompts you to sign in +again. In non-interactive environments such as scripts or CI, run `sbx login` +to re-authenticate. + +## Agent authentication failure + +If the agent can't reach its model provider or you see API key errors, the key +is likely invalid, expired, or not configured. Verify it's set in your shell +configuration file and that you sourced it or opened a new terminal. + +For agents that use the [credential proxy](security/credentials.md), make sure +you haven't set the API key to an invalid value inside the sandbox — the proxy +injects credentials automatically on outbound requests. + +If credentials are configured correctly but API calls still fail, check +`sbx policy log` and look at the **PROXY** column. Requests routed through +the `transparent` proxy don't get credential injection. This can happen when a +client inside the sandbox (such as a process in a Docker container) isn't +configured to use the forward proxy. See +[Monitoring network activity](security/policy.md#monitoring) +for details. + +## Docker build export fails with an ownership error + +Running `docker build` with the local exporter (`--output=type=local` or `-o +`) inside a sandbox fails because the exporter tries to `lchown` output +files to preserve ownership from the build. Processes inside the sandbox run as +an unprivileged user without `CAP_CHOWN`, so the operation is denied. + +Use the tar exporter and extract the archive instead: + +```console +$ mkdir -p ./result +$ docker build --output type=tar,dest=- . | tar xf - -C ./result +``` + +Extracting the tar archive as the current user avoids the `chown` call. + +## Stale Git worktree after removing a sandbox + +If you used `--branch`, worktree cleanup during `sbx rm` is best-effort. If +it fails, the sandbox is removed but the branch and worktree are left behind. +If `git worktree list` shows a stale worktree in `.sbx/` after removing a +sandbox, clean it up manually: + +```console +$ git worktree remove .sbx/-worktrees/ +$ git branch -D +``` + +## Sandbox commits aren't signed + +Docker Sandboxes can sign Git commits with SSH keys from your host agent. +For setup steps, see [Signed commits](usage.md#signed-commits). + +If `ssh-add -L` prints `The agent has no identities.`, the sandbox can reach +the forwarded agent, but the host agent doesn't have a loaded key. Load the +signing key into your host SSH agent: + +```console +$ ssh-add ~/.ssh/id_ed25519 +``` + +If commit signing works on the host but fails in a sandbox, check whether Git +is configured to sign with a host file path such as +`/Users/me/.ssh/id_ed25519.pub`. The sandbox uses the forwarded SSH agent, not +the host key file path. Use the inline public key form instead: + +```console +$ git config --global gpg.format ssh +$ git config --global user.signingkey "key::$(ssh-add -L | head -n 1)" +``` + +If Git reports that `ssh-keygen` is missing, use a sandbox template that +includes OpenSSH client tools. + +If `git log --show-signature` reports that `gpg.ssh.allowedSignersFile` needs +to be configured, Git can't verify the SSH signature locally. This verification +config isn't required to create signed commits. GitHub uses the SSH signing +keys configured in your GitHub account to verify commits. + +GPG and S/MIME signing keys aren't available inside the sandbox. If your +repository or organization requires GPG or S/MIME signatures, or if SSH signing +isn't configured, use one of these workarounds: + +- Commit outside the sandbox. Let the agent make changes without committing, + then commit and sign from your host terminal. + +- Sign after the fact. Let the agent commit inside the sandbox, then re-sign + the commits on your host: + + ```console + $ git rebase --exec 'git commit --amend --no-edit -S' origin/main + ``` + + This replays each commit on the branch and re-signs it with your local + signing key. + +## Clock drift after sleep/wake + +If your laptop sleeps and wakes while a sandbox is running, the VM clock can +fall behind the host clock. This causes problems such as: + +- External API calls failing because of timestamp validation. +- Git commits with incorrect timestamps. +- TLS certificate errors due to time mismatches. + +To fix the issue, stop and restart the sandbox: + +```console +$ sbx stop +$ sbx run +``` + +Restarting the sandbox re-syncs the VM clock with the host. + +## Removing all state + +As a last resort, if `sbx reset` doesn't resolve your issue, you can remove the +`sbx` state directory entirely. This deletes all sandbox data, configuration, and +cached images. Stop all running sandboxes first with `sbx reset`. + +{{< tabs >}} +{{< tab name="macOS" >}} + +```console +$ rm -rf ~/Library/Application\ Support/com.docker.sandboxes/ +``` + +{{< /tab >}} +{{< tab name="Windows" >}} + +```powershell +> Remove-Item -Recurse -Force "$env:LOCALAPPDATA\DockerSandboxes" +``` + +{{< /tab >}} +{{< tab name="Linux" >}} + +Sandbox state on Linux follows the XDG Base Directory specification and is +spread across three directories: + +```console +$ rm -rf ~/.local/state/sandboxes/ +$ rm -rf ~/.cache/sandboxes/ +$ rm -rf ~/.config/sandboxes/ +``` + +If you have set custom `XDG_STATE_HOME`, `XDG_CACHE_HOME`, or +`XDG_CONFIG_HOME` environment variables, replace `~/.local/state`, +`~/.cache`, and `~/.config` with the corresponding values. + +{{< /tab >}} +{{< /tabs >}} + +## Report an issue + +If you've exhausted the steps above and the problem persists, file a GitHub +issue at [github.com/docker/sbx-releases/issues](https://github.com/docker/sbx-releases/issues). + +To help Docker investigate, generate a diagnostics bundle and share it when +reporting the issue: + +```console +$ sbx diagnose --upload +``` + +The bundle contains daemon logs, diagnostic check results, and basic system +information. When `--upload` is confirmed, the bundle is uploaded to Docker +support and the command prints a diagnostics ID. Include this ID in your +issue so the team can correlate it with the uploaded bundle. diff --git a/content/manuals/ai/sandboxes/usage.md b/content/manuals/ai/sandboxes/usage.md new file mode 100644 index 00000000000..50783c6a3a4 --- /dev/null +++ b/content/manuals/ai/sandboxes/usage.md @@ -0,0 +1,396 @@ +--- +title: Usage +weight: 20 +description: Common patterns for working with sandboxes. +keywords: docker sandboxes, sbx, usage, run, policy, secrets, branches, git, workspaces, ssh +--- + +{{< summary-bar feature_name="Docker Sandboxes sbx" >}} + +## Working with sandboxes + +The basic workflow is [`run`](/reference/cli/sbx/run/) to start, +[`ls`](/reference/cli/sbx/ls/) to check status, +[`stop`](/reference/cli/sbx/stop/) to pause, and +[`rm`](/reference/cli/sbx/rm/) to clean up: + +```console +$ sbx run claude # start an agent +$ sbx ls # see what's running +$ sbx stop my-sandbox # pause it +$ sbx rm my-sandbox # delete it entirely +``` + +To get a shell inside a running sandbox — useful for inspecting the environment, +checking Docker containers, or manually installing something: + +```console +$ sbx exec -it bash +``` + +If you need a clean slate, remove the sandbox and re-run: + +```console +$ sbx rm my-sandbox +$ sbx run claude +``` + +## Interactive mode + +Running `sbx` with no subcommands opens an interactive terminal dashboard: + +```console +$ sbx +``` + +The dashboard shows all your sandboxes as cards with live status, CPU, and +memory usage. From here you can: + +- **Create** a sandbox (`c`). +- **Start or stop** a sandbox (`s`). +- **Attach** to an agent session (`Enter`), same as `sbx run`. +- **Open a shell** inside the sandbox (`x`), same as `sbx exec`. +- **Remove** a sandbox (`r`). + +The dashboard also includes a network governance panel where you can monitor +outbound connections made by your sandboxes and manage network rules. Use `tab` +to switch between the sandboxes panel and the network panel. + +From the network panel you can browse connection logs, allow or block specific +hosts, and add custom network rules. Press `?` to see all keyboard shortcuts. + +## Git workflow + +When your workspace is a Git repository, the agent edits your working tree +directly by default. Changes appear in your working tree immediately, the same +as working in a normal terminal. + +If you run multiple agents on the same repository at once, use [branch +mode](#branch-mode) to give each agent its own branch and working directory. + +### Direct mode (default) + +The agent edits your working tree directly. Stage, commit, and push as you +normally would. If you run multiple agents on the same repository at the same +time, they may step on each other's changes. See +[branch mode](#branch-mode) for an alternative. + +### Branch mode + +Pass `--branch ` to give the agent its own +[Git worktree](https://git-scm.com/docs/git-worktree) and branch. This +prevents conflicts when multiple agents, or you and an agent, write to the +same files at the same time. You can set `--branch` on `create`, `run`, or +both. + +The CLI creates worktrees under `.sbx/` in your repository root. The +worktree is a separate working directory, so the agent doesn't touch your main +working tree. This means: + +- The worktree branches off your latest commit when you create it. + Uncommitted changes in your working tree are not included (`sbx` warns you + if it detects any). +- Files you add or change in your main working tree won't be visible to the + agent, and vice versa. The two directories are independent. + +#### Starting a branch + +```console +$ sbx run claude --branch my-feature # agent works on the my-feature branch +``` + +Use `--branch auto` to let the CLI generate a branch name for you: + +```console +$ sbx run claude --branch auto +``` + +You can also create the sandbox first and add a branch at run time: + +```console +$ sbx create --name my-sandbox claude . +$ sbx run --branch my-feature my-sandbox +``` + +Or set the branch at create time and reuse it on subsequent runs: + +```console +$ sbx create --name my-sandbox --branch my-feature claude . +$ sbx run my-sandbox # resumes in the my-feature worktree +$ sbx run --branch my-feature my-sandbox # same — reuses the existing worktree +``` + +#### Multiple branches per sandbox + +You can run multiple worktrees in the same sandbox by passing different branch +names: + +```console +$ sbx run --branch feature-a my-sandbox +$ sbx run --branch feature-b my-sandbox +``` + +#### Reviewing and pushing changes + +To review the agent's work, find the worktree with `git worktree list`, then +push or open a PR from there: + +```console +$ git worktree list # find the worktree path +$ cd .sbx/-worktrees/my-feature +$ git log # see what the agent did +$ git push -u origin my-feature +$ gh pr create +``` + +Some agents don't commit automatically and leave changes uncommitted in the +worktree. If that happens, commit from the worktree directory before pushing. + +See [Workspace trust](security/workspace.md) for security considerations when +reviewing agent changes. + +### Signed commits + +Sandboxes can sign Git commits with SSH keys from your host agent. The private +key stays on your host. + +On the host, load the key into your SSH agent: + +```console +$ ssh-add ~/.ssh/id_ed25519 +``` + +Inside the sandbox, check that the forwarded agent exposes the key: + +```console +$ ssh-add -L +``` + +Configure Git globally inside the sandbox to use SSH commit signing. This +writes to the sandbox user's Git config, not your repository's `.git/config`. +Use an inline public key instead of a key file path, because host paths such as +`~/.ssh/id_ed25519.pub` might not exist in the sandbox: + +```console +$ git config --global gpg.format ssh +$ git config --global user.signingkey "key::$(ssh-add -L | head -n 1)" +``` + +Then commit as usual: + +```console +$ git commit -S +``` + +For common signing failures, see +[Sandbox commits aren't signed](troubleshooting.md#sandbox-commits-arent-signed). + +#### Cleanup + +`sbx rm` removes the sandbox and all of its worktrees and branches. + +#### Ignoring the `.sbx/` directory + +Branch mode stores worktrees under `.sbx/` in your repository root. To keep +this directory out of `git status`, add it to your project's `.gitignore`: + +```console +$ echo '.sbx/' >> .gitignore +``` + +Or, to ignore it across all repositories, add `.sbx/` to your global gitignore: + +```console +$ echo '.sbx/' >> "$(git config --global core.excludesFile)" +``` + +> [!TIP] +> If `git config --global core.excludesFile` is empty, set one first: +> `git config --global core.excludesFile ~/.gitignore`. + +You can also create Git worktrees yourself and run an agent directly in one, +but the sandbox won't have access to the `.git` directory in the parent +repository. This means the agent can't commit, push, or use Git. `--branch` +solves this by setting up the worktree so that Git works inside the sandbox. + +## Reconnecting and naming + +Sandboxes persist after the agent exits. Running the same workspace path again +reconnects to the existing sandbox rather than creating a new one: + +```console +$ sbx run claude ~/my-project # creates sandbox +$ sbx run claude ~/my-project # reconnects to same sandbox +``` + +Use `--name` to make this explicit and avoid ambiguity: + +```console +$ sbx run claude --name my-project +``` + +## Creating without attaching + +[`sbx run`](/reference/cli/sbx/run/) creates the sandbox and attaches you to +the agent. To create a sandbox in the background without attaching: + +```console +$ sbx create claude . +``` + +Unlike `run`, `create` requires an explicit workspace path. It uses direct +mode by default, or pass `--branch` for [branch mode](#branch-mode). Attach +later with `sbx run`: + +```console +$ sbx run claude-my-project +``` + +## Multiple workspaces + +You can mount extra directories into a sandbox alongside the main workspace. +The first path is the primary workspace — the agent starts here, and the +sandbox's Git worktree is created from this directory if you use `--branch`. +Extra workspaces are always mounted directly. + +All workspaces appear inside the sandbox at their absolute host paths. Append +`:ro` to mount an extra workspace read-only — useful for reference material or +shared libraries the agent shouldn't modify: + +```console +$ sbx run claude ~/project-a ~/shared-libs:ro ~/docs:ro +``` + +Each sandbox is completely isolated, so you can also run separate projects +side-by-side. Remove unused sandboxes when you're done to reclaim disk space: + +```console +$ sbx run claude ~/project-a +$ sbx run claude ~/project-b +$ sbx rm # when finished +``` + +## Copying files between host and sandbox + +Use [`sbx cp`](/reference/cli/sbx/cp/) to copy files or directories between +your host and a sandbox. This is useful for one-off files that aren't part of a +mounted workspace, such as generated output, logs, or setup files. + +```console +$ sbx cp ./config.json my-sandbox:/home/user/ +$ sbx cp my-sandbox:/home/user/output.log ./ +$ sbx cp ./src/ my-sandbox:/home/user/src +``` + +One side of the copy must use `SANDBOX:PATH`. Copying directly between two +sandboxes isn't supported. + +## Installing dependencies and using Docker + +Ask the agent to install what's needed — it has sudo access, and installed +packages persist for the sandbox's lifetime. For teams or repeated setups, +see [Customize](customize/) for reusable templates and declarative kits. + +Agents can also build Docker images, run containers, and use +[Compose](https://docs.docker.com/compose/). Everything runs inside the sandbox's private Docker +daemon, so containers started by the agent never appear in your host's +`docker ps`. When you remove the sandbox, all images, containers, and volumes +inside it are deleted with it. + +## Accessing services in the sandbox + +Sandboxes are [network-isolated](security/isolation.md) — your browser or local +tools can't reach a server running inside one by default. Use +[`sbx ports`](/reference/cli/sbx/ports/) to forward traffic from your host into +a running sandbox. + +The common case: an agent has started a dev server or API, and you want to open +it in your browser or run tests against it. + +```console +$ sbx ports my-sandbox --publish 8080:3000 # host 8080 → sandbox port 3000 +$ open http://localhost:8080 +``` + +To let the OS pick a free host port instead of choosing one yourself: + +```console +$ sbx ports my-sandbox --publish 3000 # ephemeral host port +$ sbx ports my-sandbox # check which port was assigned +``` + +`sbx ls` shows active port mappings alongside each sandbox, and `sbx ports` +lists them in detail: + +```console +$ sbx ls +SANDBOX AGENT STATUS PORTS WORKSPACE +my-sandbox claude running 127.0.0.1:8080->3000/tcp /home/user/proj +``` + +To stop forwarding a port: + +```console +$ sbx ports my-sandbox --unpublish 8080:3000 +``` + +A few things to keep in mind: + +- **Services must bind to `0.0.0.0`** — a service listening on `127.0.0.1` + inside the sandbox won't be reachable through a published port. Most dev + servers default to `127.0.0.1`, so you'll usually need to pass a flag like + `--host 0.0.0.0` when starting them. +- **Not persistent** — published ports are lost when the sandbox stops or the + daemon restarts. Re-publish after restarting. +- **No create-time flag** — unlike `docker run -p`, there's no `--publish` + option on `sbx run` or `sbx create`. Ports can only be published after the + sandbox is running. +- **Unpublish requires the host port** — `--unpublish 3000` is rejected; you + must use `--unpublish 8080:3000`. Run `sbx ports my-sandbox` first if you + used an ephemeral port and need to find the assigned host port. + +## Accessing host services from a sandbox + +Services running on your host are reachable from inside a sandbox using the +hostname `host.docker.internal`. +Use this instead of `127.0.0.1` or your machine's local network IP address, +which are not reachable from inside the sandbox. + +The sandbox proxy translates `host.docker.internal` to `localhost` before +forwarding the request, so you must add the `localhost` address with the +specific port to your network policy allowlist: + +```console +$ sbx policy allow network localhost:11434 +``` + +Then use `host.docker.internal` in any configuration or request that points at +the host service. For example, to verify connectivity from a sandbox shell: + +```console +$ curl http://host.docker.internal:11434 +``` + +## Rolling out to a team + +When rolling sandboxes out across a team, two features handle different +needs: + +- [Custom templates and kits](customize/) let you package reusable agent + configurations, MCP servers, base images, and per-project policies. Every + developer pulls them down with their workspace. +- [Organization governance](security/governance.md) lets admins define + network and filesystem rules in the Docker Admin Console. The rules apply + across every developer's sandboxes and take precedence over local policy. + Available on a separate paid subscription. + +Customization gives developers shared starting points. Governance gives +admins centralized enforcement. + +## What persists + +While a sandbox exists, installed packages, Docker images, configuration +changes, and command history all persist across stops and restarts. When you +remove a sandbox, everything inside is deleted — only your workspace files +remain on your host. To preserve a configured environment, create a +[custom template](customize/templates.md). diff --git a/content/manuals/billing/3d-secure.md b/content/manuals/billing/3d-secure.md index 32f815c8768..fc4ec800ff5 100644 --- a/content/manuals/billing/3d-secure.md +++ b/content/manuals/billing/3d-secure.md @@ -1,31 +1,52 @@ --- -title: 3D Secure authentication -description: Learn about 3D Secure support for Docker billing. -keywords: billing, renewal, payments, subscriptions +title: Use 3D Secure authentication for Docker billing +linkTitle: 3D Secure authentication +description: Docker billing supports 3D Secure (3DS) for secure payment authentication. Learn how 3DS works with Docker subscriptions. +keywords: billing, renewal, payments, subscriptions, 3DS, credit card verification, secure payments, Docker billing security weight: 40 --- -> [!NOTE] -> -> [Docker plan](../subscription/setup.md) payments support 3D secure authentication. +Docker supports 3D Secure (3DS), an extra layer of authentication required +for certain credit card payments. If your bank or card issuer requires 3DS, you +may need to verify your identity before your payment can be completed. + +## How it works + +When a 3DS check is triggered during checkout, your bank or card issuer +may ask you to verify your identity. This can include: -3D Secure (3DS) authentication incorporates an additional security layer for credit card transactions. If you’re making payments for your Docker billing in a region that requires 3DS, or using a payment method that requires 3DS, you’ll need to verify your identity to complete any transactions. The method used to verify your identity varies depending on your banking institution. +- Entering a one-time password sent to your phone +- Approving the charge through your mobile banking app +- Answering a security question or using biometrics -The following transactions will use 3DS authentication if your payment method requires it. +The exact verification steps depend on your financial institution's +requirements. -- Starting a [new paid subscription](../subscription/setup.md) -- Changing your [billing cycle](/billing/cycle/) from monthly to annual +## When you need to verify + +You may be asked to verify your identity when performing any of the following +actions: + +- Starting a [paid subscription](../subscription/setup.md) +- Changing your [billing cycle](/manuals/billing/cycle.md) from monthly to annual - [Upgrading your subscription](../subscription/change.md) - [Adding seats](../subscription/manage-seats.md) to an existing subscription -## Troubleshooting +If 3DS is required and your payment method supports it, the verification prompt +will appear during checkout. -If you encounter errors completing payments due to 3DS, you can troubleshoot in the following ways. +## Troubleshooting payment verification -1. Retry your transaction and verification of your identity. -2. Contact your bank to determine any errors on their end. -3. Try a different payment method that doesn’t require 3DS. +If you're unable to complete your payment due to 3DS: -> [!TIP] +1. Retry your transaction. Make sure you're completing the verification + prompt in the same browser tab. +1. Use a different payment method. Some cards may not support 3DS properly + or be blocked. +1. Contact your bank. Your bank may be blocking the payment or the 3DS + verification attempt. + +> [!NOTE] > -> Make sure you allow third-party scripts in your browser and that any ad blocker you may use is disabled when attempting to complete payments. +> Disabling ad blockers or browser extensions that block pop-ups can help +> the 3DS prompt display correctly. diff --git a/content/manuals/billing/_index.md b/content/manuals/billing/_index.md index fc869b4dcb2..00925934faa 100644 --- a/content/manuals/billing/_index.md +++ b/content/manuals/billing/_index.md @@ -1,9 +1,9 @@ --- -title: Billing and payments +title: Manage billing and payments linkTitle: Billing -description: Discover information on billing and payment processes for Docker subscriptions. -keywords: billing, invoice, payment, subscription -weight: 20 +description: Find information about managing billing and payments for Docker subscriptions. +keywords: billing, invoice, payment, subscription, Docker billing, update payment method, billing history, invoices, payment verification, tax exemption +weight: 10 params: sidebar: group: Platform @@ -36,6 +36,7 @@ aliases: - /billing/docker-hub-pricing/ --- -Use the resources in this section to manage your billing and payment settings for your Docker subscription plans. +Use the resources in this section to manage billing and payments for your Docker +subscriptions. {{< grid items="grid_core" >}} diff --git a/content/manuals/billing/cycle.md b/content/manuals/billing/cycle.md index e3e98563ff5..d8b6b3f492d 100644 --- a/content/manuals/billing/cycle.md +++ b/content/manuals/billing/cycle.md @@ -5,94 +5,55 @@ description: Learn to change your billing cycle for your Docker subscription keywords: billing, cycle, payments, subscription --- -You can pay for a subscription plan on a monthly or yearly billing cycle. You select your preferred billing cycle when you buy your subscription. +You can choose between a monthly or annual billing cycle when purchasing a +subscription. If you have a monthly billing cycle, you can choose to +switch to an annual billing cycle. -> [!NOTE] -> -> Business plan is available only on yearly billing cycle. +If you're on a monthly plan, you can switch to a yearly plan at any time. +However, switching from a yearly to a monthly cycle isn't supported. -If you have a monthly billing cycle, you can choose to switch to an annual billing cycle. +When you change your billing cycle: -> [!NOTE] -> -> You can't switch from an annual billing cycle to a monthly cycle. +- Your next billing date reflects the new cycle. To find your next billing date, + see [View renewal date](history.md#view-renewal-date). +- Your subscription's start date resets. For example, if the monthly + subscription started on March 1 and ended on April 1, switching the billing + duration on March 15, 2024, resets the new start date to March 15, 2024, with + an end date of March 15, 2025. +- Any unused portion of your monthly subscription is prorated and applied as + credit toward an annual subscription. For example, if your monthly cost is $10 + and you're used value is $5, when you switch to an annual cycle ($100), the + final charge is $95 ($100-$5). -When you change the billing cycle's duration: +## Change personal account to an annual cycle -- The next billing date reflects the new cycle. To find your next billing date, see [View renewal date](history.md#view-renewal-date). -- The subscription's start date resets. For example, if the start date of the monthly subscription is March 1st and the end date is April 1st, then after switching the billing duration to March 15th, 2024 the new start date is March 15th, 2024, and the new end date is March 15th, 2025. -- Any unused monthly subscription is prorated and applied as credit towards the new annual period. For example, if you switch from a $10 monthly subscription to a $100 annual plan, deducting the unused monthly value (in this case $5), the migration cost becomes $95 ($100 - $5). The renewal cost after March 15, 2025 is $100. - -{{% include "tax-compliance.md" %}} - -## Personal account - -{{< tabs >}} -{{< tab name="Docker plan" >}} +Pay by invoice is not available for subscription upgrades or changes. To change your billing cycle: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. On the plans and usage page, select **Switch to annual billing**. -4. Verify your billing information. -5. Select **Continue to payment**. -6. Verify payment information and select **Upgrade subscription**. - -> [!NOTE] -> -> If you choose to pay using a US bank account, you must verify the account. For -> more information, see [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). +1. Sign in to [Docker Home](https://app.docker.com/) and select + your organization. +1. Select **Billing**. +1. On the plans and usage page, select **Switch to annual billing**. +1. Verify your billing information. +1. Select **Continue to payment**. +1. Verify payment information and select **Upgrade subscription**. If you choose to pay using a US bank account, you must verify the account. For more information, see [Verify a bank account](/manuals/billing/payment-method.md#verify-a-bank-account). The billing plans and usage page will now reflect your new annual plan details. -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - -To change your billing cycle: - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. In the bottom-right of the **Plan** tab, select **Switch to annual billing**. -5. Review the information displayed on the **Change to an Annual subscription** page and select **Accept Terms and Purchase** to confirm. - -{{< /tab >}} -{{< /tabs >}} - -## Organization - -> [!NOTE] -> -> You must be an organization owner to make changes to the payment information. - -{{< tabs >}} -{{< tab name="Docker plan" >}} - -To change your organization's billing cycle: - -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. On the plans and usage page, select **Switch to annual billing**. -4. Verify your billing information. -5. Select **Continue to payment**. -6. Verify payment information and select **Upgrade subscription**. - -> [!NOTE] -> -> If you choose to pay using a US bank account, you must verify the account. For -> more information, see [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). +## Change organization to an annual cycle -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +You must be an organization owner to make changes to the payment information. -To change your organization's billing cycle: +Pay by invoice is not available for subscription upgrades or changes. -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **My Hub** from the top-level navigation. -3. Select the organization that you want to change the payment method for, and then select **Billing**. -4. Select **Switch to annual billing**. -5. Review the information displayed on the **Change to an Annual subscription** page and select **Accept Terms and Purchase** to confirm. +Follow these steps to switch from a monthly to annual billing cycle for your +organization's Docker subscription: -{{< /tab >}} -{{< /tabs >}} \ No newline at end of file +1. Sign in to [Docker Home](https://app.docker.com/) and select + your organization. +1. Select **Billing**. +1. On the plans and usage page, select **Switch to annual billing**. +1. Verify your billing information. +1. Select **Continue to payment**. +1. Verify payment information and select **Upgrade subscription**. If you choose to pay using a US bank account, you must verify the account. For more information, see [Verify a bank account](/manuals/billing/payment-method.md#verify-a-bank-account). diff --git a/content/manuals/billing/details.md b/content/manuals/billing/details.md index 76488844406..2341798ecbd 100644 --- a/content/manuals/billing/details.md +++ b/content/manuals/billing/details.md @@ -1,59 +1,40 @@ --- -title: Update billing information +title: Manage your billing information weight: 30 description: Learn how to update your billing information in Docker Hub -keywords: payments, billing, subscription, invoices +keywords: payments, billing, subscription, invoices, update billing email, change billing address, VAT ID, Docker billing account --- -You can update the billing information for your personal account or for an organization. When you update your billing information, these changes apply to future billing invoices. Note that you can't update an existing invoice, including paid and unpaid invoices. +You can update the billing information for your personal account or for an +organization. When you update your billing information, these changes apply to +future billing invoices. The email address you provide for a billing account is +where Docker sends all invoices and other billing related communications. -The billing information provided appears on all your billing invoices. The email address provided is where Docker sends all invoices and other [billing-related communication](#update-your-billing-invoice-email-address). - -{{% include "tax-compliance.md" %}} +> [!NOTE] +> +> Existing invoices, whether paid or unpaid, cannot be updated. +> Changes only apply to future invoices. ## Manage billing information ### Personal account -{{< tabs >}} -{{< tab name="Docker plan" >}} - -To update your billing information: - -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand navigation. -4. On your billing information card, select **Change**. -5. Update your billing contact and billing address information. -6. Optional. To add or update a VAT ID, select the **I'm purchasing as a business** checkbox and enter your Tax ID. - - > [!IMPORTANT] - > - > Your VAT number must include your country prefix. For example, if you are - entering a VAT number for Germany, you would enter `DE123456789`. - -7. Select **Update**. - -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - To update your billing information: -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu, select **Billing**. -4. Select **Billing Address** and enter your updated billing information. -5. Optional. To add or update a VAT ID, enter your **Tax ID/VAT**. +1. Sign in to [Docker Home](https://app.docker.com/) and select your + organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand navigation. +1. On your billing information card, select **Change**. +1. Update your billing contact and billing address information. +1. Optional. To add or update a VAT ID, select the **I'm purchasing as a business** checkbox and enter your Tax ID. - > [!IMPORTANT] - > - > Your VAT number must include your country prefix. For example, if you are - entering a VAT number for Germany, you would enter `DE123456789`. + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + > entering a VAT number for Germany, you would enter `DE123456789`. -6. Select **Submit**. - -{{< /tab >}} -{{< /tabs >}} +1. Select **Update**. ### Organization @@ -61,113 +42,51 @@ To update your billing information: > > You must be an organization owner to make changes to the billing information. -{{< tabs >}} -{{< tab name="Docker plan" >}} - To update your billing information: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand navigation. -4. On your billing information card, select **Change**. -5. Update your billing contact and billing address information. -6. Optional. To add or update a VAT ID, select the **I'm purchasing as a business** checkbox and enter your Tax ID. +1. Sign in to [Docker Home](https://app.docker.com/) and select your + organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand navigation. +1. On your billing information card, select **Change**. +1. Update your billing contact and billing address information. +1. Optional. To add or update a VAT ID, select the **I'm purchasing as a business** checkbox and enter your Tax ID. - > [!IMPORTANT] - > - > Your VAT number must include your country prefix. For example, if you are - entering a VAT number for Germany, you would enter `DE123456789`. - -7. Select **Update**. - -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - -To update your billing information: + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + > entering a VAT number for Germany, you would enter `DE123456789`. -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the organization that you want to change the payment method for. -5. Select **Billing Address**. -6. Optional. To add or update a VAT ID, enter your **Tax ID/VAT**. +1. Select **Update**. - > [!IMPORTANT] - > - > Your VAT number must include your country prefix. For example, if you are - entering a VAT number for Germany, you would enter `DE123456789`. - -7. Select **Submit**. - -{{< /tab >}} -{{< /tabs >}} - -## Update your billing invoice email address +## Update your billing email address Docker sends the following billing-related emails: -- Confirmation of a new subscription. -- Confirmation of paid invoices. -- Notifications of credit or debit card payment failures. -- Notifications of credit or debit card expiration. -- Confirmation of a cancelled subscription -- Reminders of subscription renewals for annual subscribers. This is sent 14 days before the renewal date. +- Confirmations (new subscriptions, paid invoices) +- Notifications (card failure, card expiration) +- Reminders (subscription renewal) You can update the email address that receives billing invoices at any time. ### Personal account -{{< tabs >}} -{{< tab name="Docker plan" >}} - -To update your billing email address: - -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand navigation. -4. On your billing information card, select **Change**. -5. Update your billing contact information and select **Update**. - -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - To update your billing email address: -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select **Billing Address**. -5. Update the email address in the **Billing contact** section. -6. Select **Submit**. - -{{< /tab >}} -{{< /tabs >}} +1. Sign in to [Docker Home](https://app.docker.com/) and select your + organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand navigation. +1. On your billing information card, select **Change**. +1. Update your billing contact information and select **Update**. ### Organizations -{{< tabs >}} -{{< tab name="Docker plan" >}} - To update your billing email address: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand navigation. -4. On your billing information card, select **Change**. -5. Update your billing contact information and select **Update**. - -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - -To update your billing email address: - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the name of the organization. -5. Select **Billing Address**. -6. Update the email address in the **Billing contact** section. -7. Select **Submit**. - -{{< /tab >}} -{{< /tabs >}} +1. Sign in to [Docker Home](https://app.docker.com/) and select + your organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand navigation. +1. On your billing information card, select **Change**. +1. Update your billing contact information and select **Update**. diff --git a/content/manuals/billing/faqs.md b/content/manuals/billing/faqs.md index d9212c8b50f..3e9b3dd2fcd 100644 --- a/content/manuals/billing/faqs.md +++ b/content/manuals/billing/faqs.md @@ -7,33 +7,23 @@ tags: [FAQ] weight: 60 --- -### What credit and debit cards are supported? - -- Visa -- MasterCard -- American Express -- Discover -- JCB -- Diners -- UnionPay -- Link -- ACH transfer with a [verified](manuals/billing/payment-method.md#verify-a-bank-account) US bank account - -### What currency is supported? - -United States dollar (USD). - ### What happens if my subscription payment fails? -If your subscription payment fails, there is a grace period of 15 days, including the due date. Docker retries to collect the payment 3 times using the following schedule: +If your subscription payment fails, there is a grace period of 15 days, +including the due date. Docker retries to collect the payment 3 times using the +following schedule: - 3 days after the due date - 5 days after the previous attempt - 7 days after the previous attempt -Docker also sends an email notification `Action Required - Credit Card Payment Failed` with an attached unpaid invoice after each failed payment attempt. +Docker also sends an email notification +`Action Required - Credit Card Payment Failed` with an attached unpaid invoice +after each failed payment attempt. -Once the grace period is over and the invoice is still not paid, the subscription downgrades to a free plan and all paid features are disabled. +Once the grace period is over and the invoice is still not paid, the +subscription downgrades to a free subscription and all paid features are +disabled. ### Can I manually retry a failed payment? @@ -45,18 +35,29 @@ updated. If you need to update your default payment method, see ### Does Docker collect sales tax and/or VAT? -Docker began collecting sales tax on subscription fees for United States customers on July 1, 2024. For European customers, Docker will begin collecting VAT on March 1, 2025. +Docker collects sales tax and/or VAT from the following: -To ensure that tax assessments are correct, make sure that your billing information and VAT/Tax ID, if applicable, are updated. See [Update the billing information](/billing/details/). +- For United States customers, Docker began collecting sales tax on July 1, 2024. +- For European customers, Docker began collecting VAT on March 1, 2025. +- For United Kingdom customers, Docker began collecting VAT on May 1, 2025. -### How do I certify my tax exempt status? +To ensure that tax assessments are correct, make sure that your billing +information and VAT/Tax ID, if applicable, are updated. See +[Update the billing information](/manuals/billing/details.md). -If you're exempt from sales tax, you can [register a valid tax exemption certificate](./tax-certificate.md) with Docker's Support team. [Contact Support](https://hub.docker.com/support/contact) to get started. +If you're exempt from sales tax, see +[Register a tax certificate](/manuals/billing/tax-certificate.md). ### Does Docker offer academic pricing? -Contact the [Docker Sales Team](https://www.docker.com/company/contact). +For academic pricing, contact the +[Docker Sales Team](https://www.docker.com/company/contact). + +### Can I use pay by invoice for upgrades or additional seats? -### Do I need to do anything at the end of my subscription term? +No. Pay by invoice is only available for renewing annual subscriptions, not for +purchasing upgrades or additional seats. You must use card payment or US bank +accounts for these changes. -No. All monthly and annual subscriptions are automatically renewed at the end of the term using the original form of payment. \ No newline at end of file +For a list of supported payment methods, see +[Add or update a payment method](/manuals/billing/payment-method.md). diff --git a/content/manuals/billing/history.md b/content/manuals/billing/history.md index 20295cd3d19..66275e45b5e 100644 --- a/content/manuals/billing/history.md +++ b/content/manuals/billing/history.md @@ -1,170 +1,136 @@ --- -title: View billing history +title: Invoices and billing history weight: 40 -description: Discover how to view your billing history in Docker Hub -keywords: payments, billing, subscription, invoices, renewals, invoice management, billing administration +description: Learn how to view invoices and your billing history +keywords: payments, billing, subscription, invoices, renewals, invoice management, billing administration, pay invoice aliases: - - /billing/core-billing/history/ + - /billing/core-billing/history/ --- -In this section, learn how you can view your billing history, manage your invoices, and verify your renewal date. All monthly and annual subscriptions are automatically renewed at the end of the term using the original form of payment. +Learn how to view and pay invoices, view your billing history, and verify +your billing renewal date. All monthly and annual subscriptions are +automatically renewed at the end of the subscription term using your default +payment method. -{{% include "tax-compliance.md" %}} - -## Invoices +## View an invoice Your invoice includes the following: - Invoice number - Date of issue -- Date due +- Due date - Your "Bill to" information - Amount due (in USD) -- Description of your order, quantity if applicable, unit price, and amount (in USD) +- Pay online: Select this link to pay your invoice online +- Description of your order, quantity if applicable, unit price, and + amount (in USD) +- Subtotal, discount (if applicable), and total -The information listed in the **Bill to** section of your invoice is based on your billing information. Not all fields are required. The billing information includes the following: +The information listed in the "Bill to" section of your invoice is based on +your billing information. Not all fields are required. The billing information +includes the following: - Name (required): The name of the administrator or company -- Email address (required): The email address that receives all billing-related emails for the account - Address (required) +- Email address (required): The email address that receives all billing-related + emails for the account - Phone number - Tax ID or VAT -You can’t make changes to a paid or unpaid billing invoice. When you update your billing information, this change won't update an existing invoice. If you need to update your billing information, make sure you do so before your subscription renewal date when your invoice is finalized. For more information, see [Update the billing information](details.md). - -### View renewal date - -{{< tabs >}} -{{< tab name="Docker plan" >}} +You can’t make changes to a paid or unpaid billing invoice. When you update +your billing information, this change won't update an existing invoice. -You receive your invoice when the subscription renews. To verify your renewal date, sign in to the [Docker Home Billing](https://app.docker.com/billing). Your renewal date and amount are displayed on your subscription plan card. +If you need +to update your billing information, make sure you do so before your +subscription renewal date when your invoice is finalized. +For more information, see [Update billing information](details.md). -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +## Pay an invoice -You receive your invoice when the subscription renews. To verify your renewal date: - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your user avatar to open the drop-down menu. -3. Select **Billing**. -4. Select the user or organization account to view the billing details. Here you can find your renewal date and the renewal amount. +> [!NOTE] +> +> Pay by invoice is only available for subscribers on an annual billing cycle. +> To change your billing cycle, see [Change your billing cycle](/manuals/billing/cycle.md). -{{< /tab >}} -{{< /tabs >}} +If you've selected pay by invoice for your subscription, you'll receive email +reminders to pay your invoice at 10 days before the due date, on the due date, +and 15 days after the due date. -### Include your VAT number on your invoice +You can pay an invoice from the Docker Billing Console: -> [!NOTE] -> -> If the VAT number field is not available, complete the [Contact Support form](https://hub.docker.com/support/contact/). This field may need to be manually added. +1. Sign in to [Docker Home](https://app.docker.com/) and choose your organization. +1. Select **Billing**. +1. Select **Invoices** and locate the invoice you want to pay. +1. In the **Actions** column, select **Pay invoice**. +1. Fill out your payment details and select **Pay**. -{{< tabs >}} -{{< tab name="Docker plan" >}} +When your payment has processed, the invoice's **Status** column will update to +**Paid** and you will receive a confirmation email. -To add or update your VAT number: +If you choose to pay using a US bank account, you must verify the account. For +more information, see [Verify a bank account](/manuals/billing/payment-method.md#verify-a-bank-account). -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand menu. -4. Select **Change** on your billing information card. -5. Ensure the **I'm purchasing as a business** checkbox is checked. -6. Enter your VAT number in the Tax ID section. +### View renewal date - > [!IMPORTANT] - > - > Your VAT number must include your country prefix. For example, if you are - entering a VAT number for Germany, you would enter `DE123456789`. +You receive your invoice when the subscription renews. To verify your renewal +date: -7. Select **Update**. +1. Sign in to [Docker Home Billing](https://app.docker.com/billing). +1. Find your renewal date and amount on your subscription plan card. -Your VAT number will be included on your next invoice. +## Include your VAT number on your invoice -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +> [!NOTE] +> +> If the VAT number field is not available, complete the +> [Contact Support form](https://hub.docker.com/support/contact/). This field +> may need to be manually added. To add or update your VAT number: -1. Sign in to [Docker Hub](https://hub.docker.com). -2. For user accounts, Select your avatar in the top-right corner, then **Billing**. For organizations, select the name of the organization. -3. Select the **Billing address** link. -4. In the **Billing Information** section, select **Update information**. -5. Enter your VAT number in the Tax ID section. +1. Sign in to [Docker Home](https://app.docker.com/) and choose your + organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand menu. +1. Select **Change** on your billing information card. +1. Ensure the **I'm purchasing as a business** checkbox is checked. +1. Enter your VAT number in the Tax ID section. - > [!IMPORTANT] - > - > Your VAT number must include your country prefix. For example, if you are - entering a VAT number for Germany, you would enter `DE123456789`. + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + > entering a VAT number for Germany, you would enter `DE123456789`. -6. Select **Save**. +1. Select **Update**. Your VAT number will be included on your next invoice. -{{< /tab >}} -{{< /tabs >}} - ## View billing history -You can view the billing history and download past invoices for a personal account or organization. +You can view your billing history and download past invoices for a personal +account or organization. ### Personal account -{{< tabs >}} -{{< tab name="Docker plan" >}} - To view billing history: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Invoices** from the left-hand menu. -4. Optional. Select the **Invoice number** to open invoice details. -5. Optional. Select the **Download** button to download an invoice. - -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - -To view billing history: - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the **Payment methods and billing history** link. - You can find your past invoices in the **Invoice History** section. - -From here you can download an invoice. - -{{< /tab >}} -{{< /tabs >}} +1. Sign in to [Docker Home](https://app.docker.com/) and choose your + organization. +1. Select **Billing**. +1. Select **Invoices** from the left-hand menu. +1. Optional. Select the **Invoice number** to open invoice details. +1. Optional. Select the **Download** button to download an invoice. ### Organization -> [!NOTE] -> -> You must be an owner of the organization to view the billing history. - -{{< tabs >}} -{{< tab name="Docker plan" >}} +You must be an owner of the organization to view the billing history. To view billing history: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Invoices** from the left-hand menu. -4. Optional. Select the **invoice number** to open invoice details. -5. Optional. Select the **download** button to download an invoice. - -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - -To view billing history: - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the **Payment methods and billing history** link. - You can find your past invoices in the **Invoice History** section. - -From here you can download an invoice. - -{{< /tab >}} -{{< /tabs >}} +1. Sign in to [Docker Home](https://app.docker.com/) and select your + organization. +1. Select **Billing**. +1. Select **Invoices** from the left-hand menu. +1. Optional. Select the **invoice number** to open invoice details. +1. Optional. Select the **download** button to download an invoice. diff --git a/content/manuals/billing/payment-method.md b/content/manuals/billing/payment-method.md index f4c2ad3e2a9..8e11fbbf3e2 100644 --- a/content/manuals/billing/payment-method.md +++ b/content/manuals/billing/payment-method.md @@ -2,162 +2,98 @@ title: Add or update a payment method weight: 20 description: Learn how to add or update a payment method in Docker Hub -keywords: payments, billing, subscription, supported payment methods, failed payments, coupons -alisases: - - /billing/core-billing/payment-method/ +keywords: payments, billing, subscription, supported payment methods, failed payments, add credit card, bank transfer, Stripe Link, payment failure +aliases: + - /billing/core-billing/payment-method/ --- -This page describes how to add or update a payment method for your personal account or for an organization. +Docker supports different payment methods for your paid personal +account or organization. This page describes supported payment types, how to make payments from [Docker Home](https://app.docker.com/), and how to set up pay by invoice. -You can add a payment method or update your account's existing payment method at any time. +## Supported payment types -> [!IMPORTANT] -> -> If you want to remove all payment methods, you must first downgrade your subscription to a free plan. See [Downgrade](../subscription/change.md). +You can add a payment method or update your account's existing payment method +at any time. All charges are in United States dollars (USD). The following payment methods are supported: -The following payment methods are supported: +| Category | Payment type | +| ------------- | ----------------------------------------------------------------------- | +| Cards | Visa, MasterCard, American Express, Discover, JCB, Diners, UnionPay | +| Wallets | Stripe Link | +| Bank accounts | Automated Clearing House (ACH) transfer with a verified US bank account | -- Cards - - Visa - - MasterCard - - American Express - - Discover - - JCB - - Diners - - UnionPay -- Wallets - - Stripe Link -- Bank accounts - - ACH transfer with a [verified](manuals/billing/payment-method.md#verify-a-bank-account) US bank account +## Prerequisites -All currency, for example the amount listed on your billing invoice, is in United States dollar (USD). +Certain payment methods require additional steps before selecting them as a payment method: -{{% include "tax-compliance.md" %}} +- You must [verify a bank account](/manuals/billing/payment-method.md#verify-a-bank-account) before choosing a bank account. +- You must have a Docker Business or Docker Team plan to [pay by invoice](/manuals/billing/payment-method.md#enable-and-disable-pay-by-invoice). +- You must be an existing Stripe Link customer, or fill out the card information form to use Link payments. ## Manage payment method -### Personal account +Paid personal accounts and organizations follow the same procedures to add, update, or remove payment methods. -{{< tabs >}} -{{< tab name="Docker plan" >}} - -To add a payment method: +### Add payment method 1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Payment methods** from the left-hand menu. -4. Select **Add payment method**. -5. Enter your new payment information: - - If you are adding a card: - - Select **Card** and fill out the card information form. - - If you are adding a Link payment: - - Select **Secure, 1-click checkout with Link** and enter your Link **email address** and **phone number**. - - If you are not an existing Link customer, you must fill out the card information form to store a card for Link payments. - - If you are adding a bank account: - - Select **US bank account**. - - Verify your **Email** and **Full name**. - - If your bank is listed, select your bank's name. - - If your bank is not listed, select **Search for your bank**. - - To verify your bank account, see [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). -6. Select **Add payment method**. -7. Optional. You can set a new default payment method by selecting the **Set as default** action. -8. Optional. You can remove non-default payment methods by selecting the **Delete** action. - -> [!NOTE] -> -> If you want to set a US bank account as your default payment method, you must -> verify the account first. - -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - -To add a payment method: - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the **Payment methods and billing history** link. -5. In the **Payment method** section, select **Add payment method**. -6. Enter your new payment information: - - If you are adding a card: - - Select **Card** and fill out the card information form. - - If you are adding a Link payment: - - Select **Secure, 1-click checkout with Link** and enter your Link **email address** and **phone number**. - - If you are not an existing Link customer, you must fill out the card information form to store a card for Link payments. -7. Select **Add**. -8. Select the **Actions** icon, then select **Make default** to ensure that your new payment method applies to all purchases and subscriptions. -9. Optional. You can remove non-default payment methods by selecting the **Actions** icon. Then, select **Delete**. - -{{< /tab >}} -{{< /tabs >}} - -### Organization - -> [!NOTE] -> -> You must be an organization owner to make changes to the payment information. - -{{< tabs >}} -{{< tab name="Docker plan" >}} - -To add a payment method: +1. Select your account name for personal accounts, or select your organization name for organization accounts. +1. Select **Billing**, then **Payment methods**. +1. Select **Add payment method** and enter your new payment information: + - For first time setup, fill in your billing information. + - To purchase as a business, provide your tax ID. +1. Choose to add a card, a US bank account, or a Link payment. + - To pay with card, fill out the card information form. + - To pay with a US bank account: + - Verify your **Email** and **Full name**. + - If your bank is listed, select your bank's name. + - If your bank is not listed, select **Search for your bank**. + - To pay through Link, select an existing payment and choose **Use this card**. +1. Finish adding the payment method by selecting **Add payment method**. -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Choose your organization from the top-left drop-down. -4. Select **Payment methods** from the left-hand menu. -5. Select **Add payment method**. -6. Enter your new payment information: - - If you are adding a card: - - Select **Card** and fill out the card information form. - - If you are adding a Link payment: - - Select **Secure, 1-click checkout with Link** and enter your Link **email address** and **phone number**. - - If you are not an existing Link customer, you must fill out the card information form to store a card for Link payments. - - If you are adding a bank account: - - Select **US bank account**. - - Verify your **Email** and **Full name**. - - If your bank is listed, select your bank's name. - - If your bank is not listed, select **Search for your bank**. - - To verify your bank account, see [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). -7. Select **Add payment method**. -8. Select **Add payment method**. -9. Optional. You can set a new default payment method by selecting the **Set as default** action. -10. Optional. You can remove non-default payment methods by selecting the **Delete** action. - -> [!NOTE] -> -> If you want to set a US bank account as your default payment method, you must -> verify the account first. - -{{< /tab >}} -{{< tab name="Legacy Docker plan" >}} - -To add a payment method: - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the organization account you want to update. -5. Select the **Payment methods and billing history** link. -6. In the **Payment Method** section, select **Add payment method**. -7. Enter your new payment information: - - If you are adding a card: - - Select **Card** and fill out the card information form. - - If you are adding a Link payment: - - Select **Secure, 1-click checkout with Link** and enter your Link **email address** and **phone number**. - - If you are not an existing Link customer, you must fill out the card information form to store a card for Link payments. -8. Select **Add payment method**. -9. Select the **Actions** icon, then select **Make default** to ensure that your new payment method applies to all purchases and subscriptions. -10. Optional. You can remove non-default payment methods by selecting the **Actions** icon. Then, select **Delete**. - -{{< /tab >}} -{{< /tabs >}} +### Set default payment method + +After adding one or more payment methods, you can set one as a default method. + +1. From **Billing**, go to **Payment methods**. +1. Find the payment method you want to set as default from the **Payment method** table. +1. Select the three dots, then choose **Set as default**. + +### Remove payment method + +To remove a single payment method: + +1. From **Billing**, go to **Payment methods**. +1. Find the payment method you want to remove from the **Payment method** table. +1. Select the three dots, then choose **Remove**. + +To remove your default payment method, first set a different payment method as default, or [downgrade to a free subscription](/manuals/subscription/change.md). + +## Enable and disable pay by invoice + +> [!TIP] +> Do you need to pay by invoice? [Upgrade to a Docker Business or Docker Team plan](https://www.docker.com/pricing?ref=Docs&refAction=DocsBillingPaymentMethod) and choose the annual subscription. + +Pay by invoice requires you to pay upfront for your first subscription period using a payment card or ACH bank transfer. At renewal time, instead of automatic payment, you'll receive an invoice via +email that you must pay manually. + +Follow these steps to enable or disable pay by invoice: + +1. Sign in to [Docker Home](https://app.docker.com/) and select your + organization. +2. Select **Billing**, then **Payment methods**. +3. Select **Pay by invoice**, then select the pay by invoice toggle to enable or disable. +4. Confirm your billing contact details. If you need to change them, select + **Change** and enter your new details. + +Pay by invoice is not available for +subscription upgrades or changes. ## Verify a bank account There are two ways to verify a bank account as a payment method: -- Instant verification: Docker supports several major banks for instant verification. +- Instant verification: Docker supports several major banks for instant + verification. - Manual verification: All other banks must be verified manually. ### Instant verification @@ -166,54 +102,49 @@ To verify your bank account instantly, you must sign in to your bank account from the Docker billing flow: 1. Choose **US bank account** as your payment method. -2. Verify your **Email** and **Full name**. -3. If your bank is listed, select your bank's name or select **Search for your bank**. -4. Sign in to your bank and review the terms and conditions. This agreement -allows Docker to debit payments from your connected bank account. -5. Select **Agree and continue**. -6. Select an account to link and verify, and select **Connect account**. +1. Verify your **Email** and **Full name**. +1. If your bank is listed, select your bank's name or + select **Search for your bank**. +1. Sign in to your bank and review the terms and conditions. This agreement + allows Docker to debit payments from your connected bank account. +1. Select **Agree and continue**. +1. Select an account to link and verify, and select **Connect account**. -When the account is verified, you will see a success message in the pop-up modal. +When the account is verified, you will see a success message in the pop-up +modal. ### Manual verification -To verify your bank account manually, you must enter the micro-deposit amount from your bank statement: +To verify your bank account manually, you must enter the micro-deposit amount +from your bank statement: 1. Choose **US bank account** as your payment method. -2. Verify your **Email** and **First and last name**. -3. Select **Enter bank details manually instead**. -4. Enter your bank details: **Routing number** and **Account number**. -5. Select **Submit**. -6. You will receive an email with instructions on how to manually verify. +1. Verify your **Email** and **First and last name**. +1. Select **Enter bank details manually instead**. +1. Enter your bank details: **Routing number** and **Account number**. +1. Select **Submit**. +1. You will receive an email with instructions on how to manually verify. -Manual verification uses micro-deposits. You should see a small deposit -(e.g. $-0.01) in your bank account in 1-2 business days. Open your manual verification email and enter the amount of this deposit to verify your account. +Manual verification uses micro-deposits. You’ll see a small deposit +(such as $0.01) in your bank account within 1–2 business days. Open your manual +verification email and enter the amount of this deposit to verify your account. ## Failed payments -> [!NOTE] -> -> You can't manually retry a failed payment. Docker will retry failed payments -based on the retry schedule. +If your payment fails, select **Pay now**. This redirects you from Docker Hub so you can manually retry the payment through Stripe. -If your subscription payment fails, there is a grace period of 15 days, including the due date. Docker retries to collect the payment 3 times using the following schedule: +You have a grace period of 15 days +including the due date when your payment fails. Docker retries to collect the payment 3 times using the +following schedule: - 3 days after the due date - 5 days after the previous attempt - 7 days after the previous attempt -Docker also sends an email notification `Action Required - Credit Card Payment Failed` with an attached unpaid invoice after each failed payment attempt. - -Once the grace period is over and the invoice is still not paid, the subscription downgrades to a free plan and all paid features are disabled. - -## Redeem a coupon - -You can redeem a coupon for any paid Docker subscription. - -A coupon can be used when you: -- Sign up to a new paid subscription from a free subscription -- Upgrade an existing paid subscription - -You are asked to enter your coupon code when you confirm or enter your payment method. +Docker also sends an email notification +`Action Required - Credit Card Payment Failed` with an attached unpaid invoice +after each failed payment attempt. -If you use a coupon to pay for a subscription, when the coupon expires, your payment method is charged the full cost of your subscription. If you don't have a saved payment method, your account downgrades to a free subscription. +Once the grace period is over and the invoice is still not paid, the +subscription downgrades to a free subscription and all paid features are +disabled. diff --git a/content/manuals/billing/tax-certificate.md b/content/manuals/billing/tax-certificate.md index 2d3ba9891ed..246a12e4bbb 100644 --- a/content/manuals/billing/tax-certificate.md +++ b/content/manuals/billing/tax-certificate.md @@ -1,42 +1,52 @@ --- -title: Register a tax certificate -description: Learn how to submit a tax exemption certificate for your Docker billing. -keywords: billing, renewal, payments, tax +title: Submit a tax exemption certificate +description: Learn how to submit a tax exemption or VAT certificate for Docker billing. +keywords: billing, renewal, payments, tax, exemption, VAT, billing support, Docker billing weight: 50 --- -If you're a customer in the United States and you're exempt from sales tax, you can register a valid tax exemption certificate with Docker's Support team. If you're a global customer subject to VAT, make sure that you provide your [VAT number](/billing/history/#include-your-vat-number-on-your-invoice) including your VAT country prefix. +If you're a customer in the United States and are exempt from sales tax, you +can submit a valid tax exemption certificate to Docker Support. -{{% include "tax-compliance.md" %}} +If you're a global customer subject to VAT, make sure to include your +[VAT number](/manuals/billing/history.md#include-your-vat-number-on-your-invoice) +along with your country prefix when you update your billing profile. ## Prerequisites -Before you submit your tax exemption certificate, ensure the following. +Before submitting your certificate: -1. Your customer name matches the name on the exemption certificate -2. Your tax exemption certificate specifies Docker Inc as the Seller or Vendor and all applicable information is filled out -3. Your certificate is signed and dated, and the expiration date hasn't passed -4. You have a valid Docker ID/namespace(s) of the accounts that you want to apply the tax exemption certificate to +- The customer name must match the name on the certificate. +- The certificate must list Docker Inc. as the Seller or Vendor, with all + relevant fields completed. +- The certificate must be signed, dated, and not expired. +- You must include the Docker ID or namespace(s) for all accounts to + apply the certificate to. + +> [!IMPORTANT] +> +> You can use the same certificate for multiple namespaces, if applicable. ## Contact information -You can use the following for Docker's contact information on your tax exemption certificate. +Use the following contact information on your certificate: -Docker, Inc. -3790 El Camino Real #1052 -Palo Alto, CA 94306 +Docker, Inc. +3790 El Camino Real #1052 +Palo Alto, CA 94306 (415) 941-0376 ## Register a tax certificate 1. [Submit a Docker Support ticket](https://hub.docker.com/support/contact?topic=Billing&subtopic=Tax%20information) to initiate the process to register a tax certificate. -2. Enter the required information. -3. In the **Additional Information** field, list the Docker ID/namespace(s) of the accounts that you want to apply the tax exemption certificate to. - - > [!TIP] - > - > You can list multiple namespaces that share the same tax exemption certificate, if applicable. -4. Add the tax certificate from your system by dragging and dropping them onto the file area, or select the **Browse Files** button to open a file dialog. -5. Select **Submit**. - -Docker's support team will reach out to you if any additional information is required. You'll receive an e-mail confirmation from Docker once your tax exemption status is applied to your account. +1. Enter **Tax certificate** as the support ticket **Subject**. +1. In the **Details** field, enter **Submitting a tax certificate**. +1. Instructions will populate on how to submit a tax certificate. +1. Fill out all required fields on the support form. +1. In the file upload section, add the tax certificate by dragging and dropping + the file, or selecting **Browse files**. +1. Select **Submit**. + +Docker's support team will reach out to you if any additional information is +required. You'll receive an e-mail confirmation from Docker once your tax +exemption status is applied to your account. diff --git a/content/manuals/build-cloud/_index.md b/content/manuals/build-cloud/_index.md index f07c4bc1ed4..85a2308b52b 100644 --- a/content/manuals/build-cloud/_index.md +++ b/content/manuals/build-cloud/_index.md @@ -5,7 +5,7 @@ description: Find documentation on Docker Build Cloud to help you build your con keywords: build, cloud, cloud build, remote builder params: sidebar: - group: Products + group: Application development aliases: - /build/cloud/faq/ - /build/cloud/ @@ -47,12 +47,7 @@ cache, and encryption in transit. That means there are no shared processes or data between cloud builders. > [!NOTE] -> -> Docker Build Cloud is currently only available in the US East region. Users -> in Europe and Asia may experience increased latency compared to users based -> in North America. -> -> Support for multi-region builders is on the roadmap. +> Docker Build Cloud is only available in the US East region. ## Get Docker Build Cloud @@ -63,10 +58,10 @@ to get access to Docker Build Cloud: - Users with a free Personal account can opt-in to a 7-day free trial, with the option to subscribe for access. To start your free trial, sign in to [Docker Build Cloud Dashboard](https://app.docker.com/build/) and follow the on-screen instructions. - All users with a paid Docker subscription have access to Docker Build Cloud included -with their Docker suite of products. See [Docker subscriptions and features](/manuals/subscription/details.md) for more information. +with their Docker suite of products. See [Docker subscriptions and features](https://www.docker.com/pricing?ref=Docs&refAction=DocsBuildCloud) for more information. Once you've signed up and created a builder, continue by [setting up the builder in your local environment](./setup.md). For information about roles and permissions related to Docker Build Cloud, see -[Roles and Permissions](/manuals/security/for-admins/roles-and-permissions.md#docker-build-cloud-permissions). +[Roles and Permissions](/manuals/enterprise/security/roles-and-permissions.md#docker-build-cloud-permissions). diff --git a/content/manuals/build-cloud/builder-settings.md b/content/manuals/build-cloud/builder-settings.md index a77402c7c45..fdae2806dbf 100644 --- a/content/manuals/build-cloud/builder-settings.md +++ b/content/manuals/build-cloud/builder-settings.md @@ -6,17 +6,53 @@ keywords: build, cloud build, optimize, remote, local, cloud, registry, package The **Builder settings** page in Docker Build Cloud lets you configure disk allocation, private resource access, and firewall settings for your cloud builders in your organization. These configurations help optimize storage, enable access to private registries, and secure outbound network traffic. -## Disk allocation +## Storage and cache management -The **Disk allocation** setting lets you control how much of the available storage is dedicated to the build cache. A lower allocation increases storage available for active builds. +### Disk allocation -To make disk allocation changes, navigate to **Builder settings** in Docker Build Cloud and then adjust the **Disk allocation** slider to specify the percentage of storage used for build caching. +The **Disk allocation** setting lets you control how much of the available +storage is dedicated to the build cache. A lower allocation increases +storage available for active builds. + +To make disk allocation changes, navigate to **Builder settings** in Docker +Build Cloud and then adjust the **Disk allocation** slider to specify the +percentage of storage used for build caching. Any changes take effect immediately. +### Build cache space + +Your subscription includes the following Build cache space: + +| Subscription | Build cache space | +|--------------|-------------------| +| Personal | N/A | +| Pro | 50GB | +| Team | 100GB | +| Business | 200GB | + +### Multi-architecture storage allocation + +Docker Build Cloud automatically provisions builders for both amd64 and arm64 architectures. Your total build cache space is split equally between these +two builders: + +- Pro (50GB total): 25GB for amd64 builder + 25GB for arm64 builder +- Team (100GB total): 50GB for amd64 builder + 50GB for arm64 builder +- Business (200GB total): 100GB for amd64 builder + 100GB for arm64 builder + +> [!IMPORTANT] +> +> If you only build for one architecture, be aware that your effective cache +space is half of your subscription's total allocation. + +### Get more build cache space + +To get more Build cache space, [upgrade your subscription](/manuals/subscription/scale.md). + > [!TIP] -> -> If you build very large images, consider allocating less storage for caching. +> +> If you build large images, consider allocating less storage for caching to +leave more space for active builds. ## Private resource access @@ -26,7 +62,7 @@ For example, if your organization hosts a private [PyPI](https://pypi.org/) repo To enable your cloud builders to access your private resources, enter the host name and port of your private resource and then select **Add**. -### Authentication +### Authentication If your internal artifacts require authentication, make sure that you authenticate with the repository either before or during the build. For @@ -34,10 +70,9 @@ internal package repositories for npm or PyPI, use [build secrets](/manuals/buil to authenticate during the build. For internal OCI registries, use `docker login` to authenticate before building. -Note that if you use a private registry that requires authentication, you will -need to authenticate with `docker login` twice before building. This is because -the cloud builder needs to authenticate with Docker to use the cloud builder, -and then again to authenticate with the private registry. +If you use a private registry that requires authentication, you need to +authenticate twice before building: once to Docker Hub (to access Docker Build +Cloud), and once to your private registry (to push/pull images). ```console $ echo $DOCKER_PAT | docker login docker.io -u --password-stdin @@ -50,7 +85,5 @@ $ docker build --builder --tag registry.example.com/ --pu Firewall settings let you restrict cloud builder egress traffic to specific IP addresses. This helps enhance security by limiting external network egress from the builder. 1. Select the **Enable firewall: Restrict cloud builder egress to specific public IP address** checkbox. - 2. Enter the IP address you want to allow. - 3. Select **Add** to apply the restriction. diff --git a/content/manuals/build-cloud/ci.md b/content/manuals/build-cloud/ci.md index f5193138183..1087860c531 100644 --- a/content/manuals/build-cloud/ci.md +++ b/content/manuals/build-cloud/ci.md @@ -36,8 +36,8 @@ See [Loading build results](./usage/#loading-build-results) for details. To enable your CI/CD system to build and push images using Docker Build Cloud, provide both an access token and a username. The type of token and the username you use depend on your account type and permissions. -- If you are an organization administrator or have permission to create [organization access tokens (OAT)](../security/for-admins/access-tokens.md), use an OAT and set `DOCKER_USER` to your Docker Hub organization name. -- If you do not have permission to create OATs or are using a personal account, use a [personal access token (PAT)](/security/for-developers/access-tokens/) and set `DOCKER_USER` to your Docker Hub username. +- If you are an organization administrator or have permission to create [organization access tokens (OAT)](/manuals/enterprise/security/access-tokens.md), use an OAT and set `DOCKER_ACCOUNT` to your Docker Hub organization name. +- If you do not have permission to create OATs or are using a personal account, use a [personal access token (PAT)](/security/access-tokens/) and set `DOCKER_ACCOUNT` to your Docker Hub username. ### Creating access tokens @@ -45,14 +45,13 @@ To enable your CI/CD system to build and push images using Docker Build Cloud, p If you are an organization administrator: -1. Create an [organization access token (OAT)](../security/for-admins/access-tokens.md): - - The token must have these permissions: - - **cloud-connect** scope - - **Read public repositories** permission - - **Repository access** with **Image push** permission for the target repository: - - Expand the **Repository** drop-down. - - Select **Add repository** and choose your target repository. - - Set the **Image push** permission for the repository. +- Create an [organization access token (OAT)](/manuals/enterprise/security/access-tokens.md). The token must have these permissions: + 1. **cloud-connect** scope + 2. **Read public repositories** permission + 3. **Repository access** with **Image push** permission for the target repository: + - Expand the **Repository** drop-down. + - Select **Add repository** and choose your target repository. + - Set the **Image push** permission for the repository. If you are not an organization administrator: @@ -60,18 +59,19 @@ If you are not an organization administrator: #### For personal accounts -1. Create a [personal access token (PAT)](/security/for-developers/access-tokens/): - - Create a new token with **Read & write** access. - - Note: Building with Docker Build Cloud only requires read access, but you need write access to push images to a Docker Hub repository. +- Create a [personal access token (PAT)](/security/access-tokens/) with the following permissions: + 1. **Read & write** access. + - Note: Building with Docker Build Cloud only requires read access, but you need write access to push images to a Docker Hub repository. ## CI platform examples > [!NOTE] > -> In your CI/CD configuration, set the following variables: -> - `DOCKER_PAT` — your access token (PAT or OAT) -> - `DOCKER_USER` — your Docker Hub username (for PAT) or organization name (for OAT) +> In your CI/CD configuration, set the following variables/secrets: +> - `DOCKER_ACCESS_TOKEN` — your access token (PAT or OAT). Use a secret to store the token. +> - `DOCKER_ACCOUNT` — your Docker Hub organization name (for OAT) or username (for PAT) +> - `CLOUD_BUILDER_NAME` — the name of the cloud builder you created in the [Docker Build Cloud Dashboard](https://app.docker.com/build/) > > This ensures your builds authenticate correctly with Docker Build Cloud. @@ -90,27 +90,51 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: - username: ${{ vars.DOCKER_USER }} - password: ${{ secrets.DOCKER_PAT }} + username: ${{ vars.DOCKER_ACCOUNT }} + password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: driver: cloud - endpoint: "/default" - install: true + endpoint: "${{ vars.DOCKER_ACCOUNT }}/${{ vars.CLOUD_BUILDER_NAME }}" # for example, "acme/default" - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: - tags: "" + tags: "" # for example, "acme/my-image:latest" # For pull requests, export results to the build cache. # Otherwise, push to a registry. outputs: ${{ github.event_name == 'pull_request' && 'type=cacheonly' || 'type=registry' }} ``` +The example above uses `docker/build-push-action`, which automatically uses the +builder set up by `setup-buildx-action`. If you need to use the `docker build` +command directly instead, you have two options: + +- Use `docker buildx build` instead of `docker build` +- Set the `BUILDX_BUILDER` environment variable to use the cloud builder: + + ```yaml + - name: Set up Docker Buildx + id: builder + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + with: + driver: cloud + endpoint: "${{ vars.DOCKER_ACCOUNT }}/${{ vars.CLOUD_BUILDER_NAME }}" + + - name: Build + run: | + docker build . + env: + BUILDX_BUILDER: ${{ steps.builder.outputs.name }} + ``` + +For more information about the `BUILDX_BUILDER` environment variable, see +[Build variables](/manuals/build/building/variables.md#buildx_builder). + ### GitLab ```yaml @@ -120,7 +144,7 @@ default: - docker:24-dind before_script: - docker info - - echo "$DOCKER_PAT" | docker login --username "$DOCKER_USER" --password-stdin + - echo "$DOCKER_ACCESS_TOKEN" | docker login --username "$DOCKER_ACCOUNT" --password-stdin - | apk add curl jq ARCH=${CI_RUNNER_EXECUTABLE_ARCH#*/} @@ -128,11 +152,12 @@ default: mkdir -vp ~/.docker/cli-plugins/ curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx - - docker buildx create --use --driver cloud ${DOCKER_ORG}/default + - docker buildx create --use --driver cloud ${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME} variables: IMAGE_NAME: - DOCKER_ORG: + DOCKER_ACCOUNT: # your Docker Hub organization name (or username when using a personal account) + CLOUD_BUILDER_NAME: # the name of the cloud builder you created in the [Docker Build Cloud Dashboard](https://app.docker.com/build/) # Build multi-platform image and push to a registry build_push: @@ -176,8 +201,8 @@ jobs: curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx - - run: echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin - - run: docker buildx create --use --driver cloud "/default" + - run: echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ --password-stdin + - run: docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" - run: | docker buildx build \ @@ -199,8 +224,8 @@ jobs: curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx - - run: echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin - - run: docker buildx create --use --driver cloud "/default" + - run: echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ --password-stdin + - run: docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" - run: | docker buildx build \ @@ -231,7 +256,7 @@ Add the following `environment` hook agent's hook directory: set -euo pipefail if [[ "$BUILDKITE_PIPELINE_NAME" == "build-push-docker" ]]; then - export DOCKER_PAT="" + export DOCKER_ACCESS_TOKEN="" fi ``` @@ -239,7 +264,8 @@ Create a `pipeline.yml` that uses the `docker-login` plugin: ```yaml env: - DOCKER_ORG: + DOCKER_ACCOUNT: # your Docker Hub organization name (or username when using a personal account) + CLOUD_BUILDER_NAME: # the name of the cloud builder you created in the [Docker Build Cloud Dashboard](https://app.docker.com/build/) IMAGE_NAME: steps: @@ -247,8 +273,8 @@ steps: key: build-push plugins: - docker-login#v2.1.0: - username: - password-env: DOCKER_PAT # the variable name in the environment hook + username: DOCKER_ACCOUNT + password-env: DOCKER_ACCESS_TOKEN # the variable name in the environment hook ``` Create the `build.sh` script: @@ -277,7 +303,7 @@ curl --silent -L --output $DOCKER_DIR/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx # Connect to your builder and set it as the default builder -docker buildx create --use --driver cloud "$DOCKER_ORG/default" +docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" # Cache-only image build docker buildx build \ @@ -302,9 +328,9 @@ pipeline { environment { ARCH = 'amd64' - DOCKER_PAT = credentials('docker-personal-access-token') - DOCKER_USER = credentials('docker-username') - DOCKER_ORG = '' + DOCKER_ACCESS_TOKEN = credentials('docker-access-token') + DOCKER_ACCOUNT = credentials('docker-account') + CLOUD_BUILDER_NAME = '' IMAGE_NAME = '' } @@ -317,8 +343,8 @@ pipeline { sh 'mkdir -vp ~/.docker/cli-plugins/' sh 'curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL' sh 'chmod a+x ~/.docker/cli-plugins/docker-buildx' - sh 'echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin' - sh 'docker buildx create --use --driver cloud "$DOCKER_ORG/default"' + sh 'echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ACCOUNT --password-stdin' + sh 'docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}"' // Cache-only build sh 'docker buildx build --platform linux/amd64,linux/arm64 --tag "$IMAGE_NAME" --output type=cacheonly .' // Build and push a multi-platform image @@ -340,10 +366,10 @@ services: env: global: - - IMAGE_NAME=username/repo + - IMAGE_NAME= # for example, "acme/my-image:latest" before_install: | - echo "$DOCKER_PAT" | docker login --username "$DOCKER_USER" --password-stdin + echo "$DOCKER_ACCESS_TOKEN" | docker login --username "$DOCKER_ACCOUNT" --password-stdin install: | set -e @@ -351,7 +377,7 @@ install: | mkdir -vp ~/.docker/cli-plugins/ curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx - docker buildx create --use --driver cloud "/default" + docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" script: | docker buildx build \ @@ -363,9 +389,8 @@ script: | ### BitBucket Pipelines ```yaml -# Prerequisites: $DOCKER_USER, $DOCKER_PAT setup as deployment variables +# Prerequisites: $DOCKER_ACCOUNT, $CLOUD_BUILDER_NAME, $DOCKER_ACCESS_TOKEN setup as deployment variables # This pipeline assumes $BITBUCKET_REPO_SLUG as the image name -# Replace in the `docker buildx create` command with your Docker org image: atlassian/default-image:3 @@ -379,8 +404,8 @@ pipelines: - BUILDX_URL=$(curl -s https://raw.githubusercontent.com/docker/actions-toolkit/main/.github/buildx-lab-releases.json | jq -r ".latest.assets[] | select(endswith(\"linux-$ARCH\"))") - curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL - chmod a+x ~/.docker/cli-plugins/docker-buildx - - echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin - - docker buildx create --use --driver cloud "/default" + - echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ACCOUNT --password-stdin + - docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" - IMAGE_NAME=$BITBUCKET_REPO_SLUG - docker buildx build --platform linux/amd64,linux/arm64 @@ -404,11 +429,11 @@ mkdir -vp ~/.docker/cli-plugins/ curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx -# Login to Docker Hub. For security reasons $DOCKER_PAT should be a Personal Access Token. See https://docs.docker.com/build-cloud/ci/#creating-access-tokens -echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin +# Login to Docker Hub with an access token. See https://docs.docker.com/build-cloud/ci/#creating-access-tokens +echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ACCOUNT --password-stdin # Connect to your builder and set it as the default builder -docker buildx create --use --driver cloud "/default" +docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" # Cache-only image build docker buildx build \ @@ -449,11 +474,11 @@ curl --silent -L --output ~/.docker/cli-plugins/docker-compose $COMPOSE_URL chmod a+x ~/.docker/cli-plugins/docker-buildx chmod a+x ~/.docker/cli-plugins/docker-compose -# Login to Docker Hub. For security reasons $DOCKER_PAT should be a Personal Access Token. See https://docs.docker.com/build-cloud/ci/#creating-access-tokens -echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin +# Login to Docker Hub with an access token. See https://docs.docker.com/build-cloud/ci/#creating-access-tokens +echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ACCOUNT --password-stdin # Connect to your builder and set it as the default builder -docker buildx create --use --driver cloud "/default" +docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" # Build the image build docker compose build diff --git a/content/manuals/build-cloud/setup.md b/content/manuals/build-cloud/setup.md index 57c2e366314..56f78180c42 100644 --- a/content/manuals/build-cloud/setup.md +++ b/content/manuals/build-cloud/setup.md @@ -16,7 +16,8 @@ environment. To get started with Docker Build Cloud, you need to: - Download and install Docker Desktop version 4.26.0 or later. -- Sign up for a Docker Build Cloud subscription in the [Docker Build Cloud Dashboard](https://app.docker.com/build/). +- Create a cloud builder on the [Docker Build Cloud Dashboard](https://app.docker.com/build/). + - When you create the builder, choose a name for it (for example, `default`). You will use this name as `BUILDER_NAME` in the CLI steps below. ### Use Docker Build Cloud without Docker Desktop @@ -50,9 +51,17 @@ command, or using the Docker Desktop settings GUI. $ docker buildx create --driver cloud / ``` - Replace `ORG` with the Docker Hub namespace of your Docker organization. + Replace `` with the Docker Hub namespace of your Docker organization (or your username if you are using a personal account), and `` with the name you chose when creating the builder in the dashboard. + + This creates a local instance of the cloud builder named `cloud-ORG-BUILDER_NAME`. + + > [!NOTE] + > + > If your organization is `acme` and you named your builder `default`, use: + > ```console + > $ docker buildx create --driver cloud acme/default + > ``` -This creates a builder named `cloud-ORG-BUILDER_NAME`. {{< /tab >}} {{< tab name="Docker Desktop" >}} diff --git a/content/manuals/build-cloud/usage.md b/content/manuals/build-cloud/usage.md index 891bb7df9cf..a6970aeceb1 100644 --- a/content/manuals/build-cloud/usage.md +++ b/content/manuals/build-cloud/usage.md @@ -45,11 +45,11 @@ Changing your default builder with `docker buildx use` only changes the default builder for the `docker buildx build` command. The `docker build` command still uses the `default` builder, unless you specify the `--builder` flag explicitly. -If you use build scripts, such as `make`, we recommend that you update your -build commands from `docker build` to `docker buildx build`, to avoid any -confusion with regards to builder selection. Alternatively, you can run `docker -buildx install` to make the default `docker build` command behave like `docker -buildx build`, without discrepancies. +If you use build scripts, such as `make`, that use the `docker build` command, +we recommend updating your build commands to `docker buildx build`. Alternatively, +you can set the [`BUILDX_BUILDER` environment +variable](/manuals/build/building/variables.md#buildx_builder) to specify which +builder `docker build` should use. ## Use with Docker Compose @@ -144,7 +144,7 @@ The traffic is encrypted and secrets are never stored in the build cache. > > If you're misusing build arguments to pass credentials, authentication > tokens, or other secrets, you should refactor your build to pass the secrets using -> [secret mounts](/reference/cli/docker/buildx/build.md#secret) instead. +> [secret mounts](/reference/cli/docker/buildx/build/#secret) instead. > Build arguments are stored in the cache and their values are exposed through attestations. > Secret mounts don't leak outside of the build and are never included in attestations. diff --git a/content/manuals/build/_index.md b/content/manuals/build/_index.md index c4af3ceb1af..a496238071a 100644 --- a/content/manuals/build/_index.md +++ b/content/manuals/build/_index.md @@ -5,7 +5,7 @@ description: Get an overview of Docker Build to package and bundle your code and keywords: build, buildx, buildkit params: sidebar: - group: Open source + group: Application development grid: - title: Packaging your software description: 'Build and package your application to run it anywhere: locally or diff --git a/content/manuals/build/bake/_index.md b/content/manuals/build/bake/_index.md index ee9e59453cd..088609244c5 100644 --- a/content/manuals/build/bake/_index.md +++ b/content/manuals/build/bake/_index.md @@ -10,9 +10,9 @@ Bake is a feature of Docker Buildx that lets you define your build configuration using a declarative file, as opposed to specifying a complex CLI expression. It also lets you run multiple builds concurrently with a single invocation. -A Bake file can be written in HCL, JSON, or YAML formats, where the YAML format -is an extension of a Docker Compose file. Here's an example Bake file in HCL -format: +A Bake file can be written in HCL or JSON format. Bake can also build directly +from a [Docker Compose file](./compose-file.md). Here's an example Bake file in +HCL format: ```hcl {title=docker-bake.hcl} group "default" { diff --git a/content/manuals/build/bake/compose-file.md b/content/manuals/build/bake/compose-file.md index e142133828c..7347fd32e41 100644 --- a/content/manuals/build/bake/compose-file.md +++ b/content/manuals/build/bake/compose-file.md @@ -94,7 +94,7 @@ $ docker buildx bake --print The compose format has some limitations compared to the HCL format: -- Specifying variables or global scope attributes is not yet supported +- Specifying variables or global scope attributes is not supported - `inherits` service field is not supported, but you can use [YAML anchors](/reference/compose-file/fragments.md) to reference other services, as demonstrated in the previous example with `&build-dev`. diff --git a/content/manuals/build/bake/contexts.md b/content/manuals/build/bake/contexts.md index 6157b805b50..490b20a469d 100644 --- a/content/manuals/build/bake/contexts.md +++ b/content/manuals/build/bake/contexts.md @@ -14,7 +14,7 @@ aliases: In addition to the main `context` key that defines the build context, each target can also define additional named contexts with a map defined with key `contexts`. These values map to the `--build-context` flag in the [build -command](/reference/cli/docker/buildx/build.md#build-context). +command](/reference/cli/docker/buildx/build/#build-context). Inside the Dockerfile these contexts can be used with the `FROM` instruction or `--from` flag. @@ -68,6 +68,7 @@ name with `target:` prefix. ```dockerfile {title=baseapp.Dockerfile} FROM scratch ``` + ```dockerfile {title=Dockerfile} # syntax=docker/dockerfile:1 FROM baseapp @@ -89,104 +90,3 @@ target "app" { In most cases you should just use a single multi-stage Dockerfile with multiple targets for similar behavior. This case is only recommended when you have multiple Dockerfiles that can't be easily merged into one. - -## Deduplicate context transfer - -> [!NOTE] -> -> As of Buildx version 0.17.0 and later, Bake automatically de-duplicates -> context transfer for targets that share the same context. In addition to -> Buildx version 0.17.0, the builder must be running BuildKit version 0.16.0 or -> later, and the Dockerfile syntax must be `docker/dockerfile:1.10` or later. -> -> If you meet these requirements, you don't need to manually de-duplicate -> context transfer as described in this section. -> -> - To check your Buildx version, run `docker buildx version`. -> - To check your BuildKit version, run `docker buildx inspect --bootstrap` and -> look for the `BuildKit version` field. -> - To check your Dockerfile syntax version, check the `syntax` -> [parser directive](/reference/dockerfile.md#syntax) in your Dockerfile. If -> it's not present, the default version whatever comes bundled with your -> current version of BuildKit. To set the version explicitly, add -> `#syntax=docker/dockerfile:1.10` at the top of your Dockerfile. - -When you build targets concurrently, using groups, build contexts are loaded -independently for each target. If the same context is used by multiple targets -in a group, that context is transferred once for each time it's used. This can -result in significant impact on build time, depending on your build -configuration. For example, say you have a Bake file that defines the following -group of targets: - -```hcl {title=docker-bake.hcl} -group "default" { - targets = ["target1", "target2"] -} - -target "target1" { - target = "target1" - context = "." -} - -target "target2" { - target = "target2" - context = "." -} -``` - -In this case, the context `.` is transferred twice when you build the default -group: once for `target1` and once for `target2`. - -If your context is small, and if you are using a local builder, duplicate -context transfers may not be a big deal. But if your build context is big, or -you have a large number of targets, or you're transferring the context over a -network to a remote builder, context transfer becomes a performance bottleneck. - -To avoid transferring the same context multiple times, you can define a named -context that only loads the context files, and have each target that needs -those files reference that named context. For example, the following Bake file -defines a named target `ctx`, which is used by both `target1` and `target2`: - -```hcl {title=docker-bake.hcl} -group "default" { - targets = ["target1", "target2"] -} - -target "ctx" { - context = "." - target = "ctx" -} - -target "target1" { - target = "target1" - contexts = { - ctx = "target:ctx" - } -} - -target "target2" { - target = "target2" - contexts = { - ctx = "target:ctx" - } -} -``` - -The named context `ctx` represents a Dockerfile stage, which copies the files -from its context (`.`). Other stages in the Dockerfile can now reference the -`ctx` named context and, for example, mount its files with `--mount=from=ctx`. - -```dockerfile {title=Dockerfile} -FROM scratch AS ctx -COPY --link . . - -FROM golang:alpine AS target1 -WORKDIR /work -RUN --mount=from=ctx \ - go build -o /out/client ./cmd/client \ - -FROM golang:alpine AS target2 -WORKDIR /work -RUN --mount=from=ctx \ - go build -o /out/server ./cmd/server -``` diff --git a/content/manuals/build/bake/funcs.md b/content/manuals/build/bake/funcs.md index 76a39b28251..e99236d63c7 100644 --- a/content/manuals/build/bake/funcs.md +++ b/content/manuals/build/bake/funcs.md @@ -13,8 +13,9 @@ configuration in more complex ways than just concatenation or interpolation. ## Standard library -Bake ships with built-in support for the [`go-cty` standard library functions](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib). -The following example shows the `add` function. +Bake ships with built-in support for the [standard library functions](/manuals/build/bake/stdlib.md). + +The following example shows the `add` function: ```hcl {title=docker-bake.hcl} variable "TAG" { @@ -108,8 +109,6 @@ $ docker buildx bake --print webapp You can make references to [variables](./variables) and standard library functions inside your functions. -You can't reference user-defined functions from other functions. - The following example uses a global variable (`REPO`) in a custom function. ```hcl {title=docker-bake.hcl} diff --git a/content/manuals/build/bake/introduction.md b/content/manuals/build/bake/introduction.md index fcc035a9a8f..e1387f2e6e0 100644 --- a/content/manuals/build/bake/introduction.md +++ b/content/manuals/build/bake/introduction.md @@ -51,10 +51,11 @@ building images in a consistent way, with the same configuration. ## The Bake file format -You can write Bake files in HCL, YAML (Docker Compose files), or JSON. In -general, HCL is the most expressive and flexible format, which is why you'll -see it used in most of the examples in this documentation, and in projects that -use Bake. +You can write Bake files in HCL or JSON. Bake can also read +[Docker Compose files](./compose-file.md) and translate each service to a build +target. HCL is the most expressive and flexible format, which is why you'll see +it used in most of the examples in this documentation, and in projects that use +Bake. The properties that can be set for a target closely resemble the CLI flags for `docker build`. For instance, consider the following `docker build` command: @@ -87,7 +88,7 @@ target "myapp" { > [!TIP] > > Want a better editing experience for Bake files in VS Code? -> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. +> Check out the [Docker DX](https://marketplace.visualstudio.com/items?itemName=docker.docker) extension for linting, code navigation, and vulnerability scanning. ## Next steps diff --git a/content/manuals/build/bake/overrides.md b/content/manuals/build/bake/overrides.md index ff051976a69..51ef54a9a76 100644 --- a/content/manuals/build/bake/overrides.md +++ b/content/manuals/build/bake/overrides.md @@ -177,7 +177,7 @@ $ docker buildx bake -f docker-bake.hcl -f overrides.hcl --print ## Command line You can also override target configurations from the command line with the -[`--set` flag](/reference/cli/docker/buildx/bake.md#set): +[`--set` flag](/reference/cli/docker/buildx/bake/#set): ```hcl # docker-bake.hcl @@ -212,6 +212,11 @@ $ docker buildx bake --set app.args.mybuildarg=bar --set app.platform=linux/arm6 } ``` +> [!NOTE] +> +> `--set` is a repeatable flag. For array fields such as `tags`, repeat `--set` to provide multiple values or use the `+=` operator to append without replacing. +> Array literal syntax like `--set target.tags=[a,b]` is not supported. + Pattern matching syntax defined in [https://golang.org/pkg/path/#Match](https://golang.org/pkg/path/#Match) is also supported: diff --git a/content/manuals/build/bake/remote-definition.md b/content/manuals/build/bake/remote-definition.md index 15b1769f4ac..895850817f1 100644 --- a/content/manuals/build/bake/remote-definition.md +++ b/content/manuals/build/bake/remote-definition.md @@ -176,7 +176,7 @@ and use the `cwd://` prefix for the metadata Bake file: ```yml - name: Build - uses: docker/bake-action@v6 + uses: docker/bake-action@{{% param "bake_action_version" %}} with: files: | ./docker-bake.hcl diff --git a/content/manuals/build/bake/targets.md b/content/manuals/build/bake/targets.md index 183eb939e92..e7fc93ae335 100644 --- a/content/manuals/build/bake/targets.md +++ b/content/manuals/build/bake/targets.md @@ -54,6 +54,18 @@ $ docker buildx bake The properties you can set for a target closely resemble the CLI flags for `docker build`, with a few additional properties that are specific to Bake. +The `dockerfile` property specifies the path to the Dockerfile for a target. +If you also set a `context`, the `dockerfile` path resolves relative to that +context. + +```hcl {title=docker-bake.hcl} +target "default" { + context = "app" + # resolves to app/src/www/Dockerfile + dockerfile = "src/www/Dockerfile" +} +``` + For all the properties you can set for a target, see the [Bake reference](/build/bake/reference#target). ## Grouping targets @@ -96,6 +108,47 @@ command. $ docker buildx bake all ``` +## Pattern matching for targets and groups + +Bake supports shell-style wildcard patterns when specifying target or grouped targets. +This makes it easier to build multiple targets without listing each one explicitly. + +Supported patterns: + +- `*` matches any sequence of characters +- `?` matches any single character +- `[abc]` matches any character in brackets + +> [!NOTE] +> +> Always wrap wildcard patterns in quotes. Without quotes, your shell will expand the +> wildcard to match files in the current directory, causing errors. + +Examples: + +```console +# Match all targets starting with 'foo-' +$ docker buildx bake "foo-*" + +# Match all targets +$ docker buildx bake "*" + +# Matches: foo-baz, foo-caz, foo-daz, etc. +$ docker buildx bake "foo-?az" + +# Matches: foo-bar, boo-bar +$ docker buildx bake "[fb]oo-bar" + +# Matches: mtx-a-b-d, mtx-a-b-e, mtx-a-b-f +$ docker buildx bake "mtx-a-b-*" +``` + +You can also combine multiple patterns: + +```console +$ docker buildx bake "foo*" "tests" +``` + ## Additional resources Refer to the following pages to learn more about Bake's features: diff --git a/content/manuals/build/bake/variables.md b/content/manuals/build/bake/variables.md index e4861a5a723..120c18b3e84 100644 --- a/content/manuals/build/bake/variables.md +++ b/content/manuals/build/bake/variables.md @@ -2,7 +2,7 @@ title: Variables in Bake linkTitle: Variables weight: 40 -description: +description: keywords: build, buildx, bake, buildkit, hcl, variables --- @@ -93,7 +93,7 @@ range, or other condition, you can define custom validation rules using the `validation` block. In the following example, validation is used to enforce a numeric constraint on -a variable value; the `PORT` variable must be 1024 or higher. +a variable value; the `PORT` variable must be 1024 or greater. ```hcl {title=docker-bake.hcl} # Define a variable `PORT` with a default value and a validation rule @@ -103,7 +103,7 @@ variable "PORT" { # Validation block to ensure `PORT` is a valid number within the acceptable range validation { condition = PORT >= 1024 # Ensure `PORT` is at least 1024 - error_message = "The variable 'PORT' must be 1024 or higher." # Error message for invalid values + error_message = "The variable 'PORT' must be 1024 or greater." # Error message for invalid values } } ``` diff --git a/content/manuals/build/builders/_index.md b/content/manuals/build/builders/_index.md index ccb8d9a6202..bc61ea2aaf1 100644 --- a/content/manuals/build/builders/_index.md +++ b/content/manuals/build/builders/_index.md @@ -78,27 +78,21 @@ older versions of the Docker CLI. The `docker buildx build` command, on the other hand, checks whether you've set a different builder as the default builder before it sends your build to BuildKit. -To use the `docker build` command with a non-default builder, you must either: +To use the `docker build` command with a non-default builder, you must either +specify the builder explicitly: -- Specify the builder explicitly, using the `--builder` flag or the `BUILDX_BUILDER` environment variable: +- Using the `--builder` flag: ```console - $ BUILDX_BUILDER=my_builder docker build . $ docker build --builder my_builder . ``` -- Configure Buildx as the default client by running the following command: +- Or the `BUILDX_BUILDER` environment variable: ```console - $ docker buildx install + $ BUILDX_BUILDER=my_builder docker build . ``` - This updates your [Docker CLI configuration file](/reference/cli/docker/_index.md#configuration-files) - to ensure all of your build-related commands are routed via Buildx. - - > [!TIP] - > To undo this change, run `docker buildx uninstall`. - In general, we recommend that you use the `docker buildx build` command when diff --git a/content/manuals/build/builders/drivers/_index.md b/content/manuals/build/builders/drivers/_index.md index de4f772c641..a90261ca451 100644 --- a/content/manuals/build/builders/drivers/_index.md +++ b/content/manuals/build/builders/drivers/_index.md @@ -28,7 +28,7 @@ The following table outlines some differences between drivers. | Feature | `docker` | `docker-container` | `kubernetes` | `remote` | | :--------------------------- | :---------: | :----------------: | :----------: | :----------------: | | **Automatically load image** | ✅ | | | | -| **Cache export** | ✓\* | ✅ | ✅ | ✅ | +| **Cache export** | ✅\* | ✅ | ✅ | ✅ | | **Tarball output** | | ✅ | ✅ | ✅ | | **Multi-arch images** | | ✅ | ✅ | ✅ | | **BuildKit configuration** | | ✅ | ✅ | Managed externally | diff --git a/content/manuals/build/builders/drivers/docker-container.md b/content/manuals/build/builders/drivers/docker-container.md index 025db83eba5..28ceb4508ec 100644 --- a/content/manuals/build/builders/drivers/docker-container.md +++ b/content/manuals/build/builders/drivers/docker-container.md @@ -49,6 +49,7 @@ pass to `--driver-opt`: | `cgroup-parent` | String | `/docker/buildx` | Sets the cgroup parent of the container if Docker is using the "cgroupfs" driver. | | `restart-policy` | String | `unless-stopped` | Sets the container's [restart policy](/manuals/engine/containers/start-containers-automatically.md#use-a-restart-policy). | | `env.` | String | | Sets the environment variable `key` to the specified `value` in the container. | +| `provenance-add-gha` | Boolean | `true` | Automatically writes GitHub Actions context into the builder for provenance. | Before you configure the resource limits for the container, read about [configuring runtime resource constraints for containers](/engine/containers/resource_constraints/). @@ -133,14 +134,14 @@ $ docker buildx build \ You can customize the network that the builder container uses. This is useful if you need to use a specific network for your builds. -For example, let's [create a network](/reference/cli/docker/network/create.md) +For example, let's [create a network](/reference/cli/docker/network/create/) named `foonet`: ```console $ docker network create foonet ``` -Now create a [`docker-container` builder](/reference/cli/docker/buildx/create.md) +Now create a [`docker-container` builder](/reference/cli/docker/buildx/create/) that will use this network: ```console @@ -150,13 +151,13 @@ $ docker buildx create --use \ --driver-opt "network=foonet" ``` -Boot and [inspect `mybuilder`](/reference/cli/docker/buildx/inspect.md): +Boot and [inspect `mybuilder`](/reference/cli/docker/buildx/inspect/): ```console $ docker buildx inspect --bootstrap ``` -[Inspect the builder container](/reference/cli/docker/inspect.md) +[Inspect the builder container](/reference/cli/docker/inspect/) and see what network is being used: ```console @@ -167,4 +168,4 @@ map[foonet:0xc00018c0c0] ## Further reading For more information on the Docker container driver, see the -[buildx reference](/reference/cli/docker/buildx/create.md#driver). +[buildx reference](/reference/cli/docker/buildx/create/#driver). diff --git a/content/manuals/build/builders/drivers/docker.md b/content/manuals/build/builders/drivers/docker.md index 91fe317c22b..a01fb316137 100644 --- a/content/manuals/build/builders/drivers/docker.md +++ b/content/manuals/build/builders/drivers/docker.md @@ -37,4 +37,4 @@ If you need additional configuration and flexibility, consider using the ## Further reading For more information on the Docker driver, see the -[buildx reference](/reference/cli/docker/buildx/create.md#driver). +[buildx reference](/reference/cli/docker/buildx/create/#driver). diff --git a/content/manuals/build/builders/drivers/kubernetes.md b/content/manuals/build/builders/drivers/kubernetes.md index 115bb73cfd4..946e014fde8 100644 --- a/content/manuals/build/builders/drivers/kubernetes.md +++ b/content/manuals/build/builders/drivers/kubernetes.md @@ -30,29 +30,31 @@ $ docker buildx create \ The following table describes the available driver-specific options that you can pass to `--driver-opt`: -| Parameter | Type | Default | Description | -| ---------------------------- | ------------ | --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | -| `image` | String | | Sets the image to use for running BuildKit. | -| `namespace` | String | Namespace in current Kubernetes context | Sets the Kubernetes namespace. | -| `default-load` | Boolean | `false` | Automatically load images to the Docker Engine image store. | -| `replicas` | Integer | 1 | Sets the number of Pod replicas to create. See [scaling BuildKit][1] | -| `requests.cpu` | CPU units | | Sets the request CPU value specified in units of Kubernetes CPU. For example `requests.cpu=100m` or `requests.cpu=2` | -| `requests.memory` | Memory size | | Sets the request memory value specified in bytes or with a valid suffix. For example `requests.memory=500Mi` or `requests.memory=4G` | -| `requests.ephemeral-storage` | Storage size | | Sets the request ephemeral-storage value specified in bytes or with a valid suffix. For example `requests.ephemeral-storage=2Gi` | -| `limits.cpu` | CPU units | | Sets the limit CPU value specified in units of Kubernetes CPU. For example `requests.cpu=100m` or `requests.cpu=2` | -| `limits.memory` | Memory size | | Sets the limit memory value specified in bytes or with a valid suffix. For example `requests.memory=500Mi` or `requests.memory=4G` | -| `limits.ephemeral-storage` | Storage size | | Sets the limit ephemeral-storage value specified in bytes or with a valid suffix. For example `requests.ephemeral-storage=100M` | -| `nodeselector` | CSV string | | Sets the pod's `nodeSelector` label(s). See [node assignment][2]. | -| `annotations` | CSV string | | Sets additional annotations on the deployments and pods. | -| `labels` | CSV string | | Sets additional labels on the deployments and pods. | -| `tolerations` | CSV string | | Configures the pod's taint toleration. See [node assignment][2]. | -| `serviceaccount` | String | | Sets the pod's `serviceAccountName`. | -| `schedulername` | String | | Sets the scheduler responsible for scheduling the pod. | -| `timeout` | Time | `120s` | Set the timeout limit that determines how long Buildx will wait for pods to be provisioned before a build. | -| `rootless` | Boolean | `false` | Run the container as a non-root user. See [rootless mode][3]. | -| `loadbalance` | String | `sticky` | Load-balancing strategy (`sticky` or `random`). If set to `sticky`, the pod is chosen using the hash of the context path. | -| `qemu.install` | Boolean | `false` | Install QEMU emulation for multi platforms support. See [QEMU][4]. | -| `qemu.image` | String | `tonistiigi/binfmt:latest` | Sets the QEMU emulation image. See [QEMU][4]. | +| Parameter | Type | Default | Description | +| ------------------------------------------ | ------------ | --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `image` | String | | Sets the image to use for running BuildKit. | +| `namespace` | String | Namespace in current Kubernetes context | Sets the Kubernetes namespace. | +| `default-load` | Boolean | `false` | Automatically load images to the Docker Engine image store. | +| `replicas` | Integer | 1 | Sets the number of Pod replicas to create. See [scaling BuildKit][1] | +| `requests.cpu` | CPU units | | Sets the request CPU value specified in units of Kubernetes CPU. For example `requests.cpu=100m` or `requests.cpu=2` | +| `requests.memory` | Memory size | | Sets the request memory value specified in bytes or with a valid suffix. For example `requests.memory=500Mi` or `requests.memory=4G` | +| `requests.ephemeral-storage` | Storage size | | Sets the request ephemeral-storage value specified in bytes or with a valid suffix. For example `requests.ephemeral-storage=2Gi` | +| `persistent-volume-claim.requests.storage` | Storage size | | Sets the requested size for a persistent volume claim. When set, Buildx creates a `StatefulSet` and stores the BuildKit build cache in the claim. For example `persistent-volume-claim.requests.storage=20Gi` | +| `limits.cpu` | CPU units | | Sets the limit CPU value specified in units of Kubernetes CPU. For example `requests.cpu=100m` or `requests.cpu=2` | +| `limits.memory` | Memory size | | Sets the limit memory value specified in bytes or with a valid suffix. For example `requests.memory=500Mi` or `requests.memory=4G` | +| `limits.ephemeral-storage` | Storage size | | Sets the limit ephemeral-storage value specified in bytes or with a valid suffix. For example `requests.ephemeral-storage=100M` | +| `buildkit-root-volume-memory` | Memory size | Using regular file system | Mounts `/var/lib/buildkit` on an `emptyDir` memory-backed volume, with `SizeLimit` as the value. For example, `buildkit-root-folder-memory=6G` | +| `nodeselector` | CSV string | | Sets the pod's `nodeSelector` label(s). See [node assignment][2]. | +| `annotations` | CSV string | | Sets additional annotations on the `Deployment` or `StatefulSet` and pods. | +| `labels` | CSV string | | Sets additional labels on the `Deployment` or `StatefulSet` and pods. | +| `tolerations` | CSV string | | Configures the pod's taint toleration. See [node assignment][2]. | +| `serviceaccount` | String | | Sets the pod's `serviceAccountName`. | +| `schedulername` | String | | Sets the scheduler responsible for scheduling the pod. | +| `timeout` | Time | `120s` | Set the timeout limit that determines how long Buildx will wait for pods to be provisioned before a build. | +| `rootless` | Boolean | `false` | Run the container as a non-root user. See [rootless mode][3]. | +| `loadbalance` | String | `sticky` | Load-balancing strategy (`sticky` or `random`). If set to `sticky`, the pod is chosen using the hash of the context path. | +| `qemu.install` | Boolean | `false` | Install QEMU emulation for multi platforms support. See [QEMU][4]. | +| `qemu.image` | String | `tonistiigi/binfmt:latest` | Sets the QEMU emulation image. See [QEMU][4]. | [1]: #scaling-buildkit [2]: #node-assignment @@ -74,8 +76,7 @@ is configurable using the following driver options: - `requests.cpu`, `requests.memory`, `requests.ephemeral-storage`, `limits.cpu`, `limits.memory`, `limits.ephemeral-storage` These options allow requesting and limiting the resources available to each - BuildKit pod according to the official Kubernetes documentation - [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + BuildKit pod [according to the official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). For example, to create 4 replica BuildKit pods: @@ -109,7 +110,28 @@ replicas. `sticky` (the default) attempts to connect the same build performed multiple times to the same node each time, ensuring better use of local cache. For more information on scalability, see the options for -[`docker buildx create`](/reference/cli/docker/buildx/create.md#driver-opt). +[`docker buildx create`](/reference/cli/docker/buildx/create/#driver-opt). + +## Persistent storage + +Set the `persistent-volume-claim.requests.storage` driver option to store the +BuildKit build cache in a persistent volume claim instead of the pod +filesystem. +When you set this option, Buildx creates a `StatefulSet` instead of a +`Deployment`. + +If you also set `replicas`, each replica gets its own persistent volume claim. +This keeps the build cache local to each pod across restarts. + +For example, to create a builder with 20 GiB of persistent storage per replica: + +```console +$ docker buildx create \ + --bootstrap \ + --name=kube \ + --driver=kubernetes \ + --driver-opt=namespace=buildkit,replicas=4,persistent-volume-claim.requests.storage=20Gi +``` ## Node assignment @@ -118,7 +140,8 @@ using the `nodeSelector` and `tolerations` driver options. You can also set the `schedulername` option if you want to use a custom scheduler altogether. You can use the `annotations` and `labels` driver options to apply additional -metadata to the deployments and pods that's hosting your builders. +metadata to the `Deployment` or `StatefulSet` and the pods hosting your +builders. The value of the `nodeSelector` parameter is a comma-separated string of key-value pairs, where the key is the node label and the value is the label @@ -246,8 +269,8 @@ that you want to support. ## Rootless mode The Kubernetes driver supports rootless mode. For more information on how -rootless mode works, and its requirements, see -[here](https://github.com/moby/buildkit/blob/master/docs/rootless.md). +rootless mode works, and its requirements, refer to the +[Rootless Buildkit documentation](https://github.com/moby/buildkit/blob/master/docs/rootless.md). To turn it on in your cluster, you can use the `rootless=true` driver option: @@ -349,4 +372,4 @@ That's it: you've now built an image from a Kubernetes pod, using Buildx. ## Further reading For more information on the Kubernetes driver, see the -[buildx reference](/reference/cli/docker/buildx/create.md#driver). +[buildx reference](/reference/cli/docker/buildx/create/#driver). diff --git a/content/manuals/build/builders/drivers/remote.md b/content/manuals/build/builders/drivers/remote.md index f002d49ea36..d059cdfdcc7 100644 --- a/content/manuals/build/builders/drivers/remote.md +++ b/content/manuals/build/builders/drivers/remote.md @@ -48,9 +48,9 @@ Unix socket, and have Buildx connect through it. $ sudo ./buildkitd --group $(id -gn) --addr unix://$HOME/buildkitd.sock ``` - Alternatively, [see here](https://github.com/moby/buildkit/blob/master/docs/rootless.md) - for running buildkitd in rootless mode or [here](https://github.com/moby/buildkit/tree/master/examples/systemd) - for examples of running it as a systemd service. + Alternatively, refer to the [Rootless Buildkit documentation](https://github.com/moby/buildkit/blob/master/docs/rootless.md) + for running buildkitd in rootless mode, or [the BuildKit systemd examples](https://github.com/moby/buildkit/tree/master/examples/systemd) + for running it as a systemd service. 2. Check that you have a Unix socket that you can connect to. @@ -159,13 +159,13 @@ BuildKit manually. Additionally, when executing builds from inside Kubernetes pods, the Buildx builder will need to be recreated from within each pod or copied between them. -1. Create a Kubernetes deployment of `buildkitd`, as per the instructions - [here](https://github.com/moby/buildkit/tree/master/examples/kubernetes). +1. Create a Kubernetes deployment of `buildkitd` by following the instructions + [in the BuildKit documentation](https://github.com/moby/buildkit/tree/master/examples/kubernetes). - Following the guide, create certificates for the BuildKit daemon and client - using [create-certs.sh](https://github.com/moby/buildkit/blob/master/examples/kubernetes/create-certs.sh), - and create a deployment of BuildKit pods with a service that connects to - them. + Create certificates for the BuildKit daemon and client using the + [create-certs.sh](https://github.com/moby/buildkit/blob/master/examples/kubernetes/create-certs.sh), + script and create a deployment of BuildKit pods with a service that connects + to them. 2. Assuming that the service is called `buildkitd`, create a remote builder in Buildx, ensuring that the listed certificate files are present: diff --git a/content/manuals/build/builders/manage.md b/content/manuals/build/builders/manage.md index 5039fa18e5e..e3c5df84694 100644 --- a/content/manuals/build/builders/manage.md +++ b/content/manuals/build/builders/manage.md @@ -16,7 +16,7 @@ that use other drivers, such as the which runs the BuildKit daemon in a container. Use the -[`docker buildx create`](/reference/cli/docker/buildx/create.md) +[`docker buildx create`](/reference/cli/docker/buildx/create/) command to create a builder. ```console @@ -100,7 +100,7 @@ Total: 2.01GB ## Remove a builder Use the -[`docker buildx remove`](/reference/cli/docker/buildx/create.md) +[`docker buildx remove`](/reference/cli/docker/buildx/create/) command to remove a builder. ```console diff --git a/content/manuals/build/building/base-images.md b/content/manuals/build/building/base-images.md index 2e11b0ba540..1a13a039bb2 100644 --- a/content/manuals/build/building/base-images.md +++ b/content/manuals/build/building/base-images.md @@ -1,6 +1,6 @@ --- title: Base images -weight: 70 +weight: 80 description: Learn about base images and how they're created keywords: images, base image, examples aliases: @@ -102,17 +102,17 @@ which you can also use to build Ubuntu images. For example, to create an Ubuntu base image: ```dockerfile -$ sudo debootstrap focal focal > /dev/null -$ sudo tar -C focal -c . | docker import - focal +$ sudo debootstrap noble noble > /dev/null +$ sudo tar -C noble -c . | docker import - noble sha256:81ec9a55a92a5618161f68ae691d092bf14d700129093158297b3d01593f4ee3 -$ docker run focal cat /etc/lsb-release +$ docker run noble cat /etc/lsb-release DISTRIB_ID=Ubuntu -DISTRIB_RELEASE=20.04 -DISTRIB_CODENAME=focal -DISTRIB_DESCRIPTION="Ubuntu 20.04 LTS" +DISTRIB_RELEASE=24.04 +DISTRIB_CODENAME=noble +DISTRIB_DESCRIPTION="Ubuntu 24.04.2 LTS" ``` There are more example scripts for creating base images in diff --git a/content/manuals/build/building/best-practices.md b/content/manuals/build/building/best-practices.md index eb308bcf862..4e4bba5b5c0 100644 --- a/content/manuals/build/building/best-practices.md +++ b/content/manuals/build/building/best-practices.md @@ -1,7 +1,7 @@ --- title: Building best practices linkTitle: Best practices -weight: 60 +weight: 70 description: Hints, tips and guidelines for writing clean, reliable Dockerfiles keywords: base images, dockerfile, best practices, hub, official image tags: [Best practices] @@ -45,17 +45,17 @@ The first step towards achieving a secure image is to choose the right base image. When choosing an image, ensure it's built from a trusted source and keep it small. -- [Docker Official Images](https://hub.docker.com/search?image_filter=official) +- [Docker Official Images](https://hub.docker.com/search?badges=official) are a curated collection that have clear documentation, promote best practices, and are regularly updated. They provide a trusted starting point for many applications. -- [Verified Publisher](https://hub.docker.com/search?image_filter=store) images +- [Verified Publisher](https://hub.docker.com/search?badges=verified_publisher) images are high-quality images published and maintained by the organizations partnering with Docker, with Docker verifying the authenticity of the content in their repositories. -- [Docker-Sponsored Open Source](https://hub.docker.com/search?image_filter=open_source) +- [Docker-Sponsored Open Source](https://hub.docker.com/search?badges=open_source) are published and maintained by open source projects sponsored by Docker through an [open source program](../../docker-hub/image-library/trusted-content.md#docker-sponsored-open-source-software-images). @@ -77,23 +77,17 @@ dependencies can considerably lower the attack surface. ## Rebuild your images often -Docker images are immutable. Building an image is taking a snapshot of that -image at that moment. That includes any base images, libraries, or other -software you use in your build. To keep your images up-to-date and secure, make -sure to rebuild your image often, with updated dependencies. +Docker images are immutable. Building an image is taking a snapshot of +that image at that moment. That includes any base images, libraries, or +other software you use in your build. To keep your images up-to-date and +secure, rebuild your images regularly with updated dependencies. -To ensure that you're getting the latest versions of dependencies in your build, -you can use the `--no-cache` option to avoid cache hits. +### Use --pull to get fresh base images -```console -$ docker build --no-cache -t my-image:my-tag . -``` - -The following Dockerfile uses the `24.04` tag of the `ubuntu` image. Over time, -that tag may resolve to a different underlying version of the `ubuntu` image, -as the publisher rebuilds the image with new security patches and updated -libraries. Using the `--no-cache`, you can avoid cache hits and ensure a fresh -download of base images and dependencies. +The following Dockerfile uses the `24.04` tag of the `ubuntu` image. +Over time, that tag may resolve to a different underlying version of the +`ubuntu` image, as the publisher rebuilds the image with new security +patches and updated libraries. ```dockerfile # syntax=docker/dockerfile:1 @@ -101,6 +95,33 @@ FROM ubuntu:24.04 RUN apt-get -y update && apt-get install -y --no-install-recommends python3 ``` +To get the latest version of the base image, use the `--pull` flag: + +```console +$ docker build --pull -t my-image:my-tag . +``` + +The `--pull` flag forces Docker to check for and download a newer +version of the base image, even if you have a version cached locally. + +### Use --no-cache for clean builds + +The `--no-cache` flag disables the build cache, forcing Docker to +rebuild all layers from scratch: + +```console +$ docker build --no-cache -t my-image:my-tag . +``` + +This gets the latest available versions of dependencies from package +managers like `apt-get` or `npm`. However, `--no-cache` doesn't pull a +fresh base image - it only prevents reusing cached layers. For a +completely fresh build with the latest base image, combine both flags: + +```console +$ docker build --pull --no-cache -t my-image:my-tag . +``` + Also consider [pinning base image versions](#pin-base-image-versions). ## Exclude with .dockerignore @@ -192,17 +213,17 @@ image. This is useful because it lets publishers update tags to point to newer versions of an image. And as an image consumer, it means you automatically get the new version when you re-build your image. -For example, if you specify `FROM alpine:3.19` in your Dockerfile, `3.19` -resolves to the latest patch version for `3.19`. +For example, if you specify `FROM alpine:3.21` in your Dockerfile, `3.21` +resolves to the latest patch version for `3.21`. ```dockerfile # syntax=docker/dockerfile:1 -FROM alpine:3.19 +FROM alpine:3.21 ``` -At one point in time, the `3.19` tag might point to version 3.19.1 of the +At one point in time, the `3.21` tag might point to version 3.21.1 of the image. If you rebuild the image 3 months later, the same tag might point to a -different version, such as 3.19.4. This publishing workflow is best practice, +different version, such as 3.21.4. This publishing workflow is best practice, and most publishers use this tagging strategy, but it isn't enforced. The downside with this is that you're not guaranteed to get the same for every @@ -213,16 +234,16 @@ To fully secure your supply chain integrity, you can pin the image version to a specific digest. By pinning your images to a digest, you're guaranteed to always use the same image version, even if a publisher replaces the tag with a new image. For example, the following Dockerfile pins the Alpine image to the -same tag as earlier, `3.19`, but this time with a digest reference as well. +same tag as earlier, `3.21`, but this time with a digest reference as well. ```dockerfile # syntax=docker/dockerfile:1 -FROM alpine:3.19@sha256:13b7e62e8df80264dbb747995705a986aa530415763a6c58f84a3ca8af9a5bcd +FROM alpine:3.21@sha256:a8560b36e8b8210634f77d9f7f9efd7ffa463e380b75e2e74aff4511df3ef88c ``` -With this Dockerfile, even if the publisher updates the `3.19` tag, your builds +With this Dockerfile, even if the publisher updates the `3.21` tag, your builds would still use the pinned image version: -`13b7e62e8df80264dbb747995705a986aa530415763a6c58f84a3ca8af9a5bcd`. +`a8560b36e8b8210634f77d9f7f9efd7ffa463e380b75e2e74aff4511df3ef88c`. While this helps you avoid unexpected changes, it's also more tedious to have to look up and include the image digest for base image versions manually each @@ -259,14 +280,14 @@ to create an efficient and maintainable Dockerfile. > [!TIP] > -> Want a better editing experience for Dockerfiles in VS Code? -> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. +> To improve linting, code navigation, and vulnerability scanning of your Dockerfiles in Visual Studio Code +> see the [Docker DX](https://marketplace.visualstudio.com/items?itemName=docker.docker) extension. ### FROM Whenever possible, use current official images as the basis for your images. Docker recommends the [Alpine image](https://hub.docker.com/_/alpine/) as it -is tightly controlled and small in size (currently under 6 MB), while still +is tightly controlled and small in size (under 6 MB), while still being a full Linux distribution. For more information about the `FROM` instruction, see [Dockerfile reference for the FROM instruction](/reference/dockerfile.md#from). @@ -442,7 +463,7 @@ reduces the image size, since the apt cache isn't stored in a layer. Since the `RUN` statement starts with `apt-get update`, the package cache is always refreshed prior to `apt-get install`. -Official Debian and Ubuntu images [automatically run `apt-get clean`](https://github.com/moby/moby/blob/03e2923e42446dbb830c654d0eec323a0b4ef02a/contrib/mkimage/debootstrap#L82-L105), so explicit invocation is not required. +Official Debian and Ubuntu images [automatically run `apt-get clean`](https://github.com/debuerreotype/debuerreotype/blob/c9542ab785e72696eb2908a6dbc9220abbabef39/scripts/debuerreotype-minimizing-config#L87-L109), so explicit invocation is not required. #### Using pipes @@ -487,7 +508,7 @@ service, such as Apache and Rails, you would run something like `CMD for any service-based image. In most other cases, `CMD` should be given an interactive shell, such as bash, -python and perl. For example, `CMD ["perl", "-de0"]`, `CMD ["python"]`, or `CMD +Python and perl. For example, `CMD ["perl", "-de0"]`, `CMD ["python"]`, or `CMD ["php", "-a"]`. Using this form means that when you execute something like `docker run -it python`, you’ll get dropped into a usable shell, ready to go. `CMD` should rarely be used in the manner of `CMD ["param", "param"]` in @@ -556,7 +577,7 @@ $ docker run --rm test sh -c 'echo $ADMIN_USER' mark ``` -To prevent this, and really unset the environment variable, use a `RUN` command +To prevent this and unset the environment variable, use a `RUN` command with shell commands, to set, use, and unset the variable all in a single layer. You can separate your commands with `;` or `&&`. If you use the second method, and one of the commands fails, the `docker build` also fails. This is usually a @@ -610,7 +631,7 @@ as part of your build. `ADD` is better than manually adding files using something like `wget` and `tar`, because it ensures a more precise build cache. `ADD` also has built-in support for checksum validation of the remote resources, and a protocol for parsing branches, tags, and subdirectories from -[Git URLs](/reference/cli/docker/buildx/build.md#git-repositories). +[Git URLs](/reference/cli/docker/buildx/build/). The following example uses `ADD` to download a .NET installer. Combined with multi-stage builds, only the .NET runtime remains in the final stage, no @@ -639,10 +660,10 @@ RUN ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet ``` For more information about `ADD` or `COPY`, see the following: + - [Dockerfile reference for the ADD instruction](/reference/dockerfile.md#add) - [Dockerfile reference for the COPY instruction](/reference/dockerfile.md#copy) - ### ENTRYPOINT The best use for `ENTRYPOINT` is to set the image's main command, allowing that @@ -679,7 +700,7 @@ For example, the [Postgres Official Image](https://hub.docker.com/_/postgres/) uses the following script as its `ENTRYPOINT`: ```bash -#!/bin/bash +#!/bin/sh set -e if [ "$1" = 'postgres' ]; then @@ -695,8 +716,7 @@ fi exec "$@" ``` - -This script uses [the `exec` Bash command](https://wiki.bash-hackers.org/commands/builtin/exec) so that the final running application becomes the container's PID 1. This allows the application to receive any Unix signals sent to the container. For more information, see the [`ENTRYPOINT` reference](/reference/dockerfile.md#entrypoint). +This script uses [the `exec` builtin](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#exec) so that the final running application becomes the container's PID 1. This allows the application to receive any Unix signals sent to the container. For more information, see the [`ENTRYPOINT` reference](/reference/dockerfile.md#entrypoint). In the following example, a helper script is copied into the container and run via `ENTRYPOINT` on container start: @@ -763,7 +783,7 @@ RUN groupadd -r postgres && useradd --no-log-init -r -g postgres postgres > with a significantly large UID inside a Docker container can lead to disk > exhaustion because `/var/log/faillog` in the container layer is filled with > NULL (\0) characters. A workaround is to pass the `--no-log-init` flag to -> useradd. The Debian/Ubuntu `adduser` wrapper does not support this flag. +> `useradd`. The Debian/Ubuntu `adduser` wrapper does not support this flag. Avoid installing or using `sudo` as it has unpredictable TTY and signal-forwarding behavior that can cause problems. If you absolutely need @@ -778,11 +798,11 @@ For more information about `USER`, see [Dockerfile reference for the USER instru ### WORKDIR For clarity and reliability, you should always use absolute paths for your -`WORKDIR`. Also, you should use `WORKDIR` instead of proliferating instructions +`WORKDIR`. Also, you should use `WORKDIR` instead of proliferating instructions like `RUN cd … && do-something`, which are hard to read, troubleshoot, and maintain. -For more information about `WORKDIR`, see [Dockerfile reference for the WORKDIR instruction](/reference/dockerfile.md#workdir). +For more information about `WORKDIR`, see [Dockerfile reference for the `WORKDIR` instruction](/reference/dockerfile.md#workdir). ### ONBUILD @@ -802,7 +822,7 @@ Dockerfile, as you can see in [Ruby’s `ONBUILD` variants](https://github.com/d Images built with `ONBUILD` should get a separate tag. For example, `ruby:1.9-onbuild` or `ruby:2.0-onbuild`. -Be careful when putting `ADD` or `COPY` in `ONBUILD`. The onbuild image +Be careful when putting `ADD` or `COPY` in `ONBUILD`. The image fails catastrophically if the new build's context is missing the resource being added. Adding a separate tag, as recommended above, helps mitigate this by allowing the Dockerfile author to make a choice. diff --git a/content/manuals/build/building/cdi.md b/content/manuals/build/building/cdi.md new file mode 100644 index 00000000000..ce459f15e1b --- /dev/null +++ b/content/manuals/build/building/cdi.md @@ -0,0 +1,540 @@ +--- +title: Container Device Interface (CDI) +weight: 60 +description: Using CDI to access GPUs and other devices in your builds +keywords: build, buildkit, buildx, guide, tutorial, cdi, device, gpu, nvidia, cuda, amd, rocm +--- + + + +The [Container Device Interface (CDI)](https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md) +is a specification designed to standardize how devices (like GPUs, FPGAs, and +other hardware accelerators) are exposed to and used by containers. The aim is +to provide a more consistent and secure mechanism for using hardware devices in +containerized environments, addressing the challenges associated with +device-specific setups and configurations. + +In addition to enabling the container to interact with the device node, CDI also +lets you specify additional configuration for the device, such as environment +variables, host mounts (such as shared objects), and executable hooks. + +## Getting started + +To get started with CDI, you need to have a compatible environment set up. This +includes having Docker v27+ installed with [CDI configured](/reference/cli/dockerd.md#configure-cdi-devices) +and Buildx v0.22+. + +You also need to create the [device specifications using JSON or YAML files](https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md#cdi-json-specification) +in one of the following locations: + +* `/etc/cdi` +* `/var/run/cdi` +* `/etc/buildkit/cdi` + +> [!NOTE] +> Location can be changed by setting the `specDirs` option in the `cdi` section +> of the [`buildkitd.toml` configuration file](../buildkit/configure.md) if you +> are using BuildKit directly. If you're building using the Docker Daemon with +> the `docker` driver, see [Configure CDI devices](/reference/cli/dockerd.md#configure-cdi-devices) +> documentation. + +> [!NOTE] +> If you are creating a container builder on WSL, you need to ensure that +> [Docker Desktop](../../desktop/_index.md) is installed and [WSL 2 GPU Paravirtualization](../../desktop/features/gpu.md#prerequisites) +> is enabled. Buildx v0.27+ is also required to mount the WSL libraries in the +> container. + +## Building with a simple CDI specification + +Let's start with a simple CDI specification that injects an environment variable +into the build environment and write it to `/etc/cdi/foo.yaml`: + +```yaml {title="/etc/cdi/foo.yaml"} +cdiVersion: "0.6.0" +kind: "vendor1.com/device" +devices: +- name: foo + containerEdits: + env: + - FOO=injected +``` + +Inspect the `default` builder to verify that `vendor1.com/device` is detected +as a device: + +```console +$ docker buildx inspect +Name: default +Driver: docker + +Nodes: +Name: default +Endpoint: default +Status: running +BuildKit version: v0.23.2 +Platforms: linux/amd64, linux/amd64/v2, linux/amd64/v3, linux/amd64/v4, linux/386 +Labels: + org.mobyproject.buildkit.worker.moby.host-gateway-ip: 172.17.0.1 +Devices: + Name: vendor1.com/device=foo + Automatically allowed: false +GC Policy rule#0: + All: false + Filters: type==source.local,type==exec.cachemount,type==source.git.checkout + Keep Duration: 48h0m0s + Max Used Space: 658.9MiB +GC Policy rule#1: + All: false + Keep Duration: 1440h0m0s + Reserved Space: 4.657GiB + Max Used Space: 953.7MiB + Min Free Space: 2.794GiB +GC Policy rule#2: + All: false + Reserved Space: 4.657GiB + Max Used Space: 953.7MiB + Min Free Space: 2.794GiB +GC Policy rule#3: + All: true + Reserved Space: 4.657GiB + Max Used Space: 953.7MiB + Min Free Space: 2.794GiB +``` + +Now let's create a Dockerfile to use this device: + +```dockerfile +# syntax=docker/dockerfile:1-labs +FROM busybox +RUN --device=vendor1.com/device \ + env | grep ^FOO= +``` + +Here we use the [`RUN --device` command](/reference/dockerfile.md#run---device) +and set `vendor1.com/device` which requests the first device available in the +specification. In this case it uses `foo`, which is the first device in +`/etc/cdi/foo.yaml`. + +> [!NOTE] +> [`RUN --device` command](/reference/dockerfile.md#run---device) is only +> featured in [`labs` channel](../buildkit/frontend.md#labs-channel) since +> [Dockerfile frontend v1.14.0-labs](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.14.0-labs) +> and not yet available in stable syntax. + +Now let's build this Dockerfile: + +```console +$ docker buildx build . +[+] Building 0.4s (5/5) FINISHED docker:default + => [internal] load build definition from Dockerfile 0.0s + => => transferring dockerfile: 155B 0.0s + => resolve image config for docker-image://docker/dockerfile:1-labs 0.1s + => CACHED docker-image://docker/dockerfile:1-labs@sha256:9187104f31e3a002a8a6a3209ea1f937fb7486c093cbbde1e14b0fa0d7e4f1b5 0.0s + => [internal] load metadata for docker.io/library/busybox:latest 0.1s + => [internal] load .dockerignore 0.0s + => => transferring context: 2B 0.0s +ERROR: failed to build: failed to solve: failed to load LLB: device vendor1.com/device=foo is requested by the build but not allowed +``` + +It fails because the device `vendor1.com/device=foo` is not automatically +allowed by the build as shown in the `buildx inspect` output above: + +```text +Devices: + Name: vendor1.com/device=foo + Automatically allowed: false +``` + +To allow the device, you can use the [`--allow` flag](/reference/cli/docker/buildx/build/#allow) +with the `docker buildx build` command: + +```console +$ docker buildx build --allow device . +``` + +Or you can set the `org.mobyproject.buildkit.device.autoallow` annotation in +the CDI specification to automatically allow the device for all builds: + +```yaml {title="/etc/cdi/foo.yaml"} +cdiVersion: "0.6.0" +kind: "vendor1.com/device" +devices: +- name: foo + containerEdits: + env: + - FOO=injected +annotations: + org.mobyproject.buildkit.device.autoallow: true +``` + +Now running the build again with the `--allow device` flag: + +```console +$ docker buildx build --progress=plain --allow device . +#0 building with "default" instance using docker driver + +#1 [internal] load build definition from Dockerfile +#1 transferring dockerfile: 159B done +#1 DONE 0.0s + +#2 resolve image config for docker-image://docker/dockerfile:1-labs +#2 DONE 0.1s + +#3 docker-image://docker/dockerfile:1-labs@sha256:9187104f31e3a002a8a6a3209ea1f937fb7486c093cbbde1e14b0fa0d7e4f1b5 +#3 CACHED + +#4 [internal] load metadata for docker.io/library/busybox:latest +#4 DONE 0.1s + +#5 [internal] load .dockerignore +#5 transferring context: 2B done +#5 DONE 0.0s + +#6 [1/2] FROM docker.io/library/busybox:latest@sha256:f85340bf132ae937d2c2a763b8335c9bab35d6e8293f70f606b9c6178d84f42b +#6 CACHED + +#7 [2/2] RUN --device=vendor1.com/device env | grep ^FOO= +#7 0.155 FOO=injected +#7 DONE 0.2s +``` + +The build is successful and the output shows that the `FOO` environment variable +was injected into the build environment as specified in the CDI specification. + +## Set up a container builder with GPU support + +In this section, we will show you how to set up a [container builder](../builders/drivers/docker-container.md) +using NVIDIA GPUs. Since Buildx v0.22, when creating a new container builder, a +GPU request is automatically added to the container builder if the host has GPU +drivers installed in the kernel. This is similar to using [`--gpus=all` with the `docker run`](/reference/cli/docker/container/run/#gpus) +command. + +Now let's create a container builder named `gpubuilder` using Buildx: + +```console +$ docker buildx create --name gpubuilder --driver-opt "image=moby/buildkit:buildx-stable-1-gpu" --bootstrap +#1 [internal] booting buildkit +#1 pulling image moby/buildkit:buildx-stable-1-gpu +#1 pulling image moby/buildkit:buildx-stable-1-gpu 1.0s done +#1 creating container buildx_buildkit_gpubuilder0 +#1 creating container buildx_buildkit_gpubuilder0 8.8s done +#1 DONE 9.8s +gpubuilder +``` + +> [!NOTE] +> We made a specially crafted BuildKit image because the current BuildKit +> release image is based on Alpine that doesn't support NVIDIA drivers. The +> following image is based on Ubuntu and installs the NVIDIA client libraries +> and generates the CDI specification for your GPU in the container builder if +> a device is requested during a build. + +Let's inspect this builder: + +```console +$ docker buildx inspect gpubuilder +Name: gpubuilder +Driver: docker-container +Last Activity: 2025-07-10 08:18:09 +0000 UTC + +Nodes: +Name: gpubuilder0 +Endpoint: unix:///var/run/docker.sock +Driver Options: image="moby/buildkit:buildx-stable-1-gpu" +Status: running +BuildKit daemon flags: --allow-insecure-entitlement=network.host +BuildKit version: v0.26.2 +Platforms: linux/amd64, linux/amd64/v2, linux/amd64/v3, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6 +Labels: + org.mobyproject.buildkit.worker.executor: oci + org.mobyproject.buildkit.worker.hostname: d6aa9cbe8462 + org.mobyproject.buildkit.worker.network: host + org.mobyproject.buildkit.worker.oci.process-mode: sandbox + org.mobyproject.buildkit.worker.selinux.enabled: false + org.mobyproject.buildkit.worker.snapshotter: overlayfs +Devices: + Name: nvidia.com/gpu + On-Demand: true +GC Policy rule#0: + All: false + Filters: type==source.local,type==exec.cachemount,type==source.git.checkout + Keep Duration: 48h0m0s + Max Used Space: 488.3MiB +GC Policy rule#1: + All: false + Keep Duration: 1440h0m0s + Reserved Space: 9.313GiB + Max Used Space: 93.13GiB + Min Free Space: 188.1GiB +GC Policy rule#2: + All: false + Reserved Space: 9.313GiB + Max Used Space: 93.13GiB + Min Free Space: 188.1GiB +GC Policy rule#3: + All: true + Reserved Space: 9.313GiB + Max Used Space: 93.13GiB + Min Free Space: 188.1GiB +``` + +We can see `nvidia.com/gpu` vendor is detected as a device in the builder which +means that drivers were detected. + +Optionally you can check if NVIDIA GPU devices are available in the container +using `nvidia-smi`: + +```console +$ docker exec -it buildx_buildkit_gpubuilder0 nvidia-smi -L +GPU 0: Tesla T4 (UUID: GPU-6cf00fa7-59ac-16f2-3e83-d24ccdc56f84) +``` + +## Building with GPU support + +Let's create a simple Dockerfile that will use the GPU device: + +```dockerfile +# syntax=docker/dockerfile:1-labs +FROM ubuntu +RUN --device=nvidia.com/gpu nvidia-smi -L +``` + +Now run the build using the `gpubuilder` builder we created earlier: + +```console +$ docker buildx --builder gpubuilder build --progress=plain . +#0 building with "gpubuilder" instance using docker-container driver +... + +#7 preparing device nvidia.com/gpu +#7 0.000 > apt-get update +... +#7 4.872 > apt-get install -y gpg +... +#7 10.16 Downloading NVIDIA GPG key +#7 10.21 > apt-get update +... +#7 12.15 > apt-get install -y --no-install-recommends nvidia-container-toolkit-base +... +#7 17.80 time="2025-04-15T08:58:16Z" level=info msg="Generated CDI spec with version 0.8.0" +#7 DONE 17.8s + +#8 [2/2] RUN --device=nvidia.com/gpu nvidia-smi -L +#8 0.527 GPU 0: Tesla T4 (UUID: GPU-6cf00fa7-59ac-16f2-3e83-d24ccdc56f84) +#8 DONE 1.6s +``` + +As you might have noticed, the step `#7` is preparing the `nvidia.com/gpu` +device by installing client libraries and the toolkit to generate the CDI +specifications for the GPU. + +The `nvidia-smi -L` command is then executed in the container using the GPU +device. The output shows the GPU UUID. + +You can check the generated CDI specification within the container builder with +the following command: + +```console +$ docker exec -it buildx_buildkit_gpubuilder0 cat /etc/cdi/nvidia.yaml +``` + +For the EC2 instance [`g4dn.xlarge`](https://aws.amazon.com/ec2/instance-types/g4/) +used here, it looks like this: + +```yaml {collapse=true} +cdiVersion: 0.6.0 +containerEdits: + deviceNodes: + - path: /dev/nvidia-modeset + - path: /dev/nvidia-uvm + - path: /dev/nvidia-uvm-tools + - path: /dev/nvidiactl + env: + - NVIDIA_VISIBLE_DEVICES=void + hooks: + - args: + - nvidia-cdi-hook + - create-symlinks + - --link + - ../libnvidia-allocator.so.1::/usr/lib/x86_64-linux-gnu/gbm/nvidia-drm_gbm.so + hookName: createContainer + path: /usr/bin/nvidia-cdi-hook + - args: + - nvidia-cdi-hook + - create-symlinks + - --link + - libcuda.so.1::/usr/lib/x86_64-linux-gnu/libcuda.so + hookName: createContainer + path: /usr/bin/nvidia-cdi-hook + - args: + - nvidia-cdi-hook + - enable-cuda-compat + - --host-driver-version=570.133.20 + hookName: createContainer + path: /usr/bin/nvidia-cdi-hook + - args: + - nvidia-cdi-hook + - update-ldcache + - --folder + - /usr/lib/x86_64-linux-gnu + hookName: createContainer + path: /usr/bin/nvidia-cdi-hook + mounts: + - containerPath: /run/nvidia-persistenced/socket + hostPath: /run/nvidia-persistenced/socket + options: + - ro + - nosuid + - nodev + - bind + - noexec + - containerPath: /usr/bin/nvidia-cuda-mps-control + hostPath: /usr/bin/nvidia-cuda-mps-control + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/bin/nvidia-cuda-mps-server + hostPath: /usr/bin/nvidia-cuda-mps-server + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/bin/nvidia-debugdump + hostPath: /usr/bin/nvidia-debugdump + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/bin/nvidia-persistenced + hostPath: /usr/bin/nvidia-persistenced + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/bin/nvidia-smi + hostPath: /usr/bin/nvidia-smi + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libcuda.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libcuda.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libcudadebugger.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libcudadebugger.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-allocator.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-allocator.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-cfg.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-cfg.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-gpucomp.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-gpucomp.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-ml.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-ml.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-nscq.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-nscq.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-nvvm.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-nvvm.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-opencl.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-opencl.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-pkcs11-openssl3.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-pkcs11-openssl3.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-pkcs11.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-pkcs11.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /usr/lib/x86_64-linux-gnu/libnvidia-ptxjitcompiler.so.570.133.20 + hostPath: /usr/lib/x86_64-linux-gnu/libnvidia-ptxjitcompiler.so.570.133.20 + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /lib/firmware/nvidia/570.133.20/gsp_ga10x.bin + hostPath: /lib/firmware/nvidia/570.133.20/gsp_ga10x.bin + options: + - ro + - nosuid + - nodev + - bind + - containerPath: /lib/firmware/nvidia/570.133.20/gsp_tu10x.bin + hostPath: /lib/firmware/nvidia/570.133.20/gsp_tu10x.bin + options: + - ro + - nosuid + - nodev + - bind +devices: +- containerEdits: + deviceNodes: + - path: /dev/nvidia0 + name: "0" +- containerEdits: + deviceNodes: + - path: /dev/nvidia0 + name: GPU-6cf00fa7-59ac-16f2-3e83-d24ccdc56f84 +- containerEdits: + deviceNodes: + - path: /dev/nvidia0 + name: all +kind: nvidia.com/gpu +``` + +Congrats on your first build using a GPU device with BuildKit and CDI. diff --git a/content/manuals/build/building/export.md b/content/manuals/build/building/export.md index 1b813c3f890..e507c5588c7 100644 --- a/content/manuals/build/building/export.md +++ b/content/manuals/build/building/export.md @@ -18,7 +18,7 @@ from that image, or push it to a registry. Under the hood, this uses the default exporter, called the `docker` exporter. To export your build results as files instead, you can use the `--output` flag, -or `-o` for short. the `--output` flag lets you change the output format of +or `-o` for short. The `--output` flag lets you change the output format of your build. ## Export binaries from a build diff --git a/content/manuals/build/building/multi-platform.md b/content/manuals/build/building/multi-platform.md index e60db90b380..9f35400e5bb 100644 --- a/content/manuals/build/building/multi-platform.md +++ b/content/manuals/build/building/multi-platform.md @@ -5,11 +5,11 @@ weight: 40 description: Introduction to what multi-platform builds are and how to execute them using Docker Buildx. keywords: build, buildx, buildkit, multi-platform, cross-platform, cross-compilation, emulation, QEMU, ARM, x86, Windows, Linux, macOS aliases: -- /build/buildx/multiplatform-images/ -- /desktop/multi-arch/ -- /docker-for-mac/multi-arch/ -- /mackit/multi-arch/ -- /build/guide/multi-platform/ + - /build/buildx/multiplatform-images/ + - /desktop/multi-arch/ + - /docker-for-mac/multi-arch/ + - /mackit/multi-arch/ + - /build/guide/multi-platform/ --- A multi-platform build refers to a single build invocation that targets @@ -55,40 +55,26 @@ selects the `linux/amd64` variant (if you're using Linux containers). ## Prerequisites -To build multi-platform images, you first need to make sure that your Docker -environment is set up to support it. There are two ways you can do that: +Multi-platform images require an image store that supports manifest lists. +Docker Desktop and Docker Engine 29.0+ use the +[containerd image store](/manuals/desktop/features/containerd.md) by default, +which supports multi-platform images out of the box. If you're using one of +these versions, no additional setup is needed. -- You can switch from the "classic" image store to the containerd image store. -- You can create and use a custom builder. +If you're using an older version of Docker Engine, or if you upgraded from an +older version that still uses classic storage drivers, you have two options: -The "classic" image store of the Docker Engine does not support multi-platform -images. Switching to the containerd image store ensures that your Docker Engine -can push, pull, and build multi-platform images. +- Enable the containerd image store using the + [daemon configuration file](/manuals/engine/storage/containerd.md). +- Create a custom builder using the `docker-container` driver (see the following section). -Creating a custom builder that uses a driver with multi-platform support, -such as the `docker-container` driver, will let you build multi-platform images -without switching to a different image store. However, you still won't be able -to load the multi-platform images you build into your Docker Engine image -store. But you can push them to a container registry directly with `docker -build --push`. +### Custom builder -{{< tabs >}} -{{< tab name="containerd image store" >}} - -The steps for enabling the containerd image store depends on whether you're -using Docker Desktop or Docker Engine standalone: - -- If you're using Docker Desktop, enable the containerd image store in the - [Docker Desktop settings](/manuals/desktop/features/containerd.md). - -- If you're using Docker Engine standalone, enable the containerd image store - using the [daemon configuration file](/manuals/engine/storage/containerd.md). - -{{< /tab >}} -{{< tab name="Custom builder" >}} - -To create a custom builder, use the `docker buildx create` command to create a -builder that uses the `docker-container` driver. +As an alternative to using the containerd image store, you can create a custom +builder that uses the `docker-container` driver. This driver supports +multi-platform builds, but the resulting images aren't loaded into your Docker +Engine image store. You can push them to a container registry directly with +`docker build --push`. ```console $ docker buildx create \ @@ -102,9 +88,6 @@ $ docker buildx create \ > Docker Engine image store. For more information, see [Build > drivers](/manuals/build/builders/drivers/_index.md). -{{< /tab >}} -{{< /tabs >}} - If you're using Docker Engine standalone and you need to build multi-platform images using emulation, you also need to install QEMU, see [Install QEMU manually](#install-qemu-manually). @@ -257,7 +240,6 @@ architecture of the container. Prerequisites: - Docker Desktop, or Docker Engine with [QEMU installed](#install-qemu-manually) -- containerd image store enabled Steps: @@ -331,7 +313,7 @@ Steps: unzip ADD https://github.com/neovim/neovim.git#stable . RUN make CMAKE_BUILD_TYPE=RelWithDebInfo - + FROM scratch COPY --from=build /work/build/bin/nvim / ``` @@ -358,7 +340,7 @@ Steps: │   └── nvim └── linux_arm64 └── nvim - + 3 directories, 2 files ``` @@ -399,7 +381,7 @@ Steps: WORKDIR /app ADD https://github.com/dvdksn/buildme.git#eb6279e0ad8a10003718656c6867539bd9426ad8 . RUN go build -o server . - + FROM alpine COPY --from=build /app/server /server ENTRYPOINT ["/server"] @@ -411,10 +393,7 @@ Steps: platforms. 3. To add cross-compilation support, update the Dockerfile to use the - pre-defined `BUILDPLATFORM` and `TARGETPLATFORM` build arguments. These - arguments are automatically available in the Dockerfile when you use the - `--platform` flag with `docker build`. - + pre-defined `BUILDPLATFORM`, `TARGETOS` and `TARGETARCH` build arguments. - Pin the `golang` image to the platform of the builder using the `--platform=$BUILDPLATFORM` option. - Add `ARG` instructions for the Go compilation stages to make the @@ -435,7 +414,7 @@ Steps: WORKDIR /app ADD https://github.com/dvdksn/buildme.git#eb6279e0ad8a10003718656c6867539bd9426ad8 . RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o server . - + FROM alpine COPY --from=build /app/server /server ENTRYPOINT ["/server"] @@ -450,7 +429,7 @@ Steps: WORKDIR /app ADD https://github.com/dvdksn/buildme.git#eb6279e0ad8a10003718656c6867539bd9426ad8 . RUN go build -o server . - + FROM alpine COPY --from=build /app/server /server ENTRYPOINT ["/server"] @@ -469,7 +448,7 @@ Steps: ADD https://github.com/dvdksn/buildme.git#eb6279e0ad8a10003718656c6867539bd9426ad8 . -RUN go build -o server . +RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o server . - + FROM alpine COPY --from=build /app/server /server ENTRYPOINT ["/server"] diff --git a/content/manuals/build/building/secrets.md b/content/manuals/build/building/secrets.md index b262d39e460..fdaef1e9fea 100644 --- a/content/manuals/build/building/secrets.md +++ b/content/manuals/build/building/secrets.md @@ -37,7 +37,7 @@ First you need to pass the secret into the `docker build` command, and then you need to consume the secret in your Dockerfile. To pass a secret to a build, use the [`docker build --secret` -flag](/reference/cli/docker/buildx/build.md#secret), or the +flag](/reference/cli/docker/buildx/build/#secret), or the equivalent options for [Bake](../bake/reference.md#targetsecret). {{< tabs >}} @@ -84,8 +84,8 @@ builds, such as API tokens, passwords, or SSH keys. ### Sources The source of a secret can be either a -[file](/reference/cli/docker/buildx/build.md#file) or an -[environment variable](/reference/cli/docker/buildx/build.md#env). +[file](/reference/cli/docker/buildx/build/#file) or an +[environment variable](/reference/cli/docker/buildx/build/#typeenv). When you use the CLI or Bake, the type can be detected automatically. You can also specify it explicitly with `type=file` or `type=env`. @@ -159,7 +159,7 @@ ADD git@github.com:me/myprivaterepo.git /src/ ``` To pass an SSH socket the build, you use the [`docker build --ssh` -flag](/reference/cli/docker/buildx/build.md#ssh), or equivalent +flag](/reference/cli/docker/buildx/build/#ssh), or equivalent options for [Bake](../bake/reference.md#targetssh). ```console @@ -175,29 +175,29 @@ building with remote, private Git repositories, including: - Building with a private Git repository as build context - Fetching private Git repositories in a build with `ADD` -For example, say you have a private GitLab project at -`https://gitlab.com/example/todo-app.git`, and you want to run a build using +For example, say you have a private GitHub repository at +`https://github.com/example/todo-app.git`, and you want to run a build using that repository as the build context. An unauthenticated `docker build` command fails because the builder isn't authorized to pull the repository: ```console -$ docker build https://gitlab.com/example/todo-app.git +$ docker build https://github.com/example/todo-app.git [+] Building 0.4s (1/1) FINISHED - => ERROR [internal] load git source https://gitlab.com/example/todo-app.git + => ERROR [internal] load git source https://github.com/example/todo-app.git ------ - > [internal] load git source https://gitlab.com/example/todo-app.git: -0.313 fatal: could not read Username for 'https://gitlab.com': terminal prompts disabled + > [internal] load git source https://github.com/example/todo-app.git: +0.313 fatal: could not read Username for 'https://github.com': terminal prompts disabled ------ ``` -To authenticate the builder to the Git server, set the `GIT_AUTH_TOKEN` -environment variable to contain a valid GitLab access token, and pass it as a +To authenticate the builder to GitHub, set the `GIT_AUTH_TOKEN` +environment variable to contain a valid GitHub access token, and pass it as a secret to the build: ```console -$ GIT_AUTH_TOKEN=$(cat gitlab-token.txt) docker build \ +$ GIT_AUTH_TOKEN=$(gh auth token) docker build \ --secret id=GIT_AUTH_TOKEN \ - https://gitlab.com/example/todo-app.git + https://github.com/example/todo-app.git ``` The `GIT_AUTH_TOKEN` also works with `ADD` to fetch private Git repositories as @@ -205,31 +205,50 @@ part of your build: ```dockerfile FROM alpine -ADD https://gitlab.com/example/todo-app.git /src +ADD https://github.com/example/todo-app.git /src ``` ### HTTP authentication scheme -By default, Git authentication over HTTP uses the Bearer authentication scheme: +BuildKit supports two Git authentication secrets: + +- **`GIT_AUTH_TOKEN`**: Uses Basic authentication with a fixed username of `x-access-token` (the GitHub-style default) +- **`GIT_AUTH_HEADER`**: Uses the raw authorization header value you provide (works with any Git provider) + +#### Using GIT_AUTH_TOKEN (for example, GitHub) + +When you use `GIT_AUTH_TOKEN`, BuildKit constructs a Basic authentication header using `x-access-token` as the user: ```http -Authorization: Bearer +Authorization: Basic ")> ``` -If you need to use a Basic scheme, with a username and password, you can set -the `GIT_AUTH_HEADER` build secret: +This method works for providers that accept the `x-access-token` Basic auth pattern, such as GitHub. Example usage: ```console -$ export GIT_AUTH_TOKEN=$(cat gitlab-token.txt) -$ export GIT_AUTH_HEADER=basic +$ export GIT_AUTH_TOKEN=$(gh auth token) $ docker build \ --secret id=GIT_AUTH_TOKEN \ + https://github.com/example/todo-app.git +``` + +#### Using GIT_AUTH_HEADER (custom authorization header) + +When you use `GIT_AUTH_HEADER`, BuildKit uses the exact value you provide as the `Authorization` header: + +```http +Authorization: +``` + +Example usage with GitLab CI/CD token: + +```console +$ export GIT_AUTH_HEADER="Basic $(echo -n "gitlab-ci-token:${CI_JOB_TOKEN}" | base64)" +$ docker build \ --secret id=GIT_AUTH_HEADER \ https://gitlab.com/example/todo-app.git ``` -BuildKit currently only supports the Bearer and Basic schemes. - ### Multiple hosts You can set the `GIT_AUTH_TOKEN` and `GIT_AUTH_HEADER` secrets on a per-host @@ -238,12 +257,10 @@ hostnames. To specify a hostname, append the hostname as a suffix to the secret ID: ```console -$ export GITLAB_TOKEN=$(cat gitlab-token.txt) -$ export GERRIT_TOKEN=$(cat gerrit-username-password.txt) -$ export GERRIT_SCHEME=basic +$ export GITHUB_TOKEN=$(gh auth token) +$ export GITLAB_AUTH_HEADER="Basic $(echo -n "gitlab-ci-token:${CI_JOB_TOKEN}" | base64)" $ docker build \ - --secret id=GIT_AUTH_TOKEN.gitlab.com,env=GITLAB_TOKEN \ - --secret id=GIT_AUTH_TOKEN.gerrit.internal.example,env=GERRIT_TOKEN \ - --secret id=GIT_AUTH_HEADER.gerrit.internal.example,env=GERRIT_SCHEME \ - https://gitlab.com/example/todo-app.git + --secret id=GIT_AUTH_TOKEN.github.com,env=GITHUB_TOKEN \ + --secret id=GIT_AUTH_HEADER.gitlab.com,env=GITLAB_AUTH_HEADER \ + https://github.com/example/todo-app.git ``` diff --git a/content/manuals/build/building/variables.md b/content/manuals/build/building/variables.md index 28662a9b19e..db41b830f03 100644 --- a/content/manuals/build/building/variables.md +++ b/content/manuals/build/building/variables.md @@ -119,7 +119,7 @@ $ docker build --build-arg NODE_VERSION=current . For more information on how to use build arguments, refer to: - [`ARG` Dockerfile reference](/reference/dockerfile.md#arg) -- [`docker build --build-arg` reference](/reference/cli/docker/buildx/build.md#build-arg) +- [`docker build --build-arg` reference](/reference/cli/docker/buildx/build/#build-arg) ## `ENV` usage example @@ -310,6 +310,8 @@ They're used to configure the Buildx client, or the BuildKit daemon. | [BUILDKIT_HOST](#buildkit_host) | String | Specify host to use for remote builders. | | [BUILDKIT_PROGRESS](#buildkit_progress) | String | Configure type of progress output. | | [BUILDKIT_TTY_LOG_LINES](#buildkit_tty_log_lines) | String | Number of log lines (for active steps in TTY mode). | +| [BUILDX_BAKE_FILE](#buildx_bake_file) | String | Specify the build definition file(s) for `docker buildx bake`. | +| [BUILDX_BAKE_FILE_SEPARATOR](#buildx_bake_file_separator) | String | Specify the file-path separator for `BUILDX_BAKE_FILE`. | | [BUILDX_BAKE_GIT_AUTH_HEADER](#buildx_bake_git_auth_header) | String | HTTP authentication scheme for remote Bake files. | | [BUILDX_BAKE_GIT_AUTH_TOKEN](#buildx_bake_git_auth_token) | String | HTTP authentication token for remote Bake files. | | [BUILDX_BAKE_GIT_SSH](#buildx_bake_git_ssh) | String | SSH authentication for remote Bake files. | @@ -321,7 +323,7 @@ They're used to configure the Buildx client, or the BuildKit daemon. | [BUILDX_GIT_INFO](#buildx_git_info) | Boolean | Remove Git information in provenance attestations. | | [BUILDX_GIT_LABELS](#buildx_git_labels) | String \| Boolean | Add Git provenance labels to images. | | [BUILDX_MEM_PROFILE](#buildx_mem_profile) | String | Generate a `pprof` memory profile at the specified location. | -| [BUILDX_METADATA_PROVENANCE](#buildx_metadata_provenance) | String \| Boolean | Customize provenance informations included in the metadata file. | +| [BUILDX_METADATA_PROVENANCE](#buildx_metadata_provenance) | String \| Boolean | Customize provenance information included in the metadata file. | | [BUILDX_METADATA_WARNINGS](#buildx_metadata_warnings) | String | Include build warnings in the metadata file. | | [BUILDX_NO_DEFAULT_ATTESTATIONS](#buildx_no_default_attestations) | Boolean | Turn off default provenance attestations. | | [BUILDX_NO_DEFAULT_LOAD](#buildx_no_default_load) | Boolean | Turn off loading images to image store by default. | @@ -374,11 +376,12 @@ argument, the argument takes priority. Sets the type of the BuildKit progress output. Valid values are: -- `auto` (default) -- `plain` -- `tty` -- `quiet` -- `rawjson` +- `auto` (default): automatically uses `tty` in interactive terminals, `plain` otherwise +- `plain`: displays build steps sequentially in simple text format +- `tty`: interactive output with formatted progress bars and build steps +- `quiet`: suppresses progress output, only shows errors and final image ID +- `none`: no progress output, only shows errors +- `rawjson`: outputs build progress as raw JSON (useful for parsing by other tools) Usage: @@ -438,6 +441,44 @@ Example: } ``` +### BUILDX_BAKE_FILE + +{{< summary-bar feature_name="Buildx bake file" >}} + +Specify one or more build definition files for `docker buildx bake`. + +This environment variable provides an alternative to the `-f` / `--file` command-line flag. + +Multiple files can be specified by separating them with the system path separator (":" on Linux/macOS, ";" on Windows): + +```console +export BUILDX_BAKE_FILE=file1.hcl:file2.hcl +``` + +Or with a custom separator defined by the [BUILDX_BAKE_FILE_SEPARATOR](#buildx_bake_file_separator) variable: + +```console +export BUILDX_BAKE_FILE_SEPARATOR=@ +export BUILDX_BAKE_FILE=file1.hcl@file2.hcl +``` + +If both `BUILDX_BAKE_FILE` and the `-f` flag are set, only the files provided via `-f` are used. + +If a listed file does not exist or is invalid, bake returns an error. + +### BUILDX_BAKE_FILE_SEPARATOR + +{{< summary-bar feature_name="Buildx bake file separator" >}} + +Controls the separator used between file paths in the `BUILDX_BAKE_FILE` environment variable. + +This is useful if your file paths contain the default separator character or if you want to standardize separators across different platforms. + +```console +export BUILDX_BAKE_PATH_SEPARATOR=@ +export BUILDX_BAKE_FILE=file1.hcl@file2.hcl +``` + ### BUILDX_BAKE_GIT_AUTH_HEADER {{< summary-bar feature_name="Buildx bake Git auth token" >}} diff --git a/content/manuals/build/buildkit/_index.md b/content/manuals/build/buildkit/_index.md index 89d75f2ad53..20bbddd3092 100644 --- a/content/manuals/build/buildkit/_index.md +++ b/content/manuals/build/buildkit/_index.md @@ -5,14 +5,10 @@ description: Introduction and overview of BuildKit keywords: build, buildkit --- -## Overview - -[BuildKit](https://github.com/moby/buildkit) -is an improved backend to replace the legacy builder. BuildKit is the default builder -for users on Docker Desktop, and Docker Engine as of version 23.0. - -BuildKit provides new functionality and improves your builds' performance. -It also introduces support for handling more complex scenarios: +[BuildKit](https://github.com/moby/buildkit) is the builder backend used by +Docker. BuildKit provides improved functionality and improves your builds' +performance over the legacy builder used in earlier versions of Docker. It also +introduces support for handling more complex scenarios: - Detect and skip executing unused build stages - Parallelize building independent build stages @@ -21,16 +17,16 @@ It also introduces support for handling more complex scenarios: - Detect and skip transferring unused files in your [build context](../concepts/context.md) - Use [Dockerfile frontend](frontend.md) implementations with many - new features + additional features - Avoid side effects with rest of the API (intermediate images and containers) - Prioritize your build cache for automatic pruning -Apart from many new features, the main areas BuildKit improves on the current -experience are performance, storage management, and extensibility. From the -performance side, a significant update is a new fully concurrent build graph -solver. It can run build steps in parallel when possible and optimize out -commands that don't have an impact on the final result. We have also optimized -the access to the local source files. By tracking only the updates made to these +The main areas BuildKit improves on the legacy builder are performance, storage +management, and extensibility. From the performance side, a significant update +is a fully concurrent build graph solver. It can run build steps in parallel when possible and optimize out +commands that don't have an impact on the final result. +The access to the local source files has also been optimized. By tracking +only the updates made to these files between repeated build invocations, there is no need to wait for local files to be read or uploaded before the work can begin. @@ -39,7 +35,7 @@ files to be read or uploaded before the work can begin. At the core of BuildKit is a [Low-Level Build (LLB)](https://github.com/moby/buildkit#exploring-llb) definition format. LLB is an intermediate binary format that allows developers to extend BuildKit. LLB defines a content-addressable -dependency graph that can be used to put together very complex build +dependency graph that can be used to put together complex build definitions. It also supports features not exposed in Dockerfiles, like direct data mounting and nested invocation. @@ -72,39 +68,8 @@ BuildKit, you would ## Getting started -BuildKit is the default builder for users on Docker Desktop and Docker Engine -v23.0 and later. - -If you have installed Docker Desktop, you don't need to enable BuildKit. If you -are running a version of Docker Engine version earlier than 23.0, you can enable -BuildKit either by setting an environment variable, or by making BuildKit the -default setting in the daemon configuration. - -To set the BuildKit environment variable when running the `docker build` -command, run: - -```console -$ DOCKER_BUILDKIT=1 docker build . -``` - -> [!NOTE] -> -> Buildx always uses BuildKit. - -To use Docker BuildKit by default, edit the Docker daemon configuration in -`/etc/docker/daemon.json` as follows, and restart the daemon. - -```json -{ - "features": { - "buildkit": true - } -} -``` - -If the `/etc/docker/daemon.json` file doesn't exist, create new file called -`daemon.json` and then add the following to the file. And restart the Docker -daemon. +BuildKit is the default builder for Docker Desktop and Docker Engine users. +If you're building Windows containers, the legacy builder is used instead. ## BuildKit on Windows @@ -115,7 +80,7 @@ daemon. BuildKit has experimental support for Windows containers (WCOW) as of version 0.13. This section walks you through the steps for trying it out. -We appreciate any feedback you submit by [opening an issue here](https://github.com/moby/buildkit/issues/new), especially `buildkitd.exe`. +To share feedback, [open an issue in the repository](https://github.com/moby/buildkit/issues/new), especially `buildkitd.exe`. ### Known limitations @@ -149,12 +114,12 @@ see [GitHub issues](https://github.com/moby/buildkit/issues?q=is%3Aissue%20state Select the Docker icon in the taskbar, and then **Switch to Windows containers...**. -3. Install containerd version 1.7.7 or later following the setup instructions [here](https://github.com/containerd/containerd/blob/main/docs/getting-started.md#installing-containerd-on-windows). +3. Install containerd version 1.7.7 or later following the [setup instructions](https://github.com/containerd/containerd/blob/main/docs/getting-started.md#installing-containerd-on-windows). 4. Download and extract the latest BuildKit release. ```powershell - $version = "v0.13.1" # specify the release version, v0.13+ + $version = "v0.22.0" # specify the release version, v0.13+ $arch = "amd64" # arm64 binary available too curl.exe -LO https://github.com/moby/buildkit/releases/download/$version/buildkit-$version.windows-$arch.tar.gz # there could be another `.\bin` directory from containerd instructions @@ -179,12 +144,17 @@ see [GitHub issues](https://github.com/moby/buildkit/issues?q=is%3Aissue%20state $Env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + ` [System.Environment]::GetEnvironmentVariable("Path","User") ``` + 6. Start the BuildKit daemon. ```console > buildkitd.exe ``` + > [!NOTE] + > If you are running a _dockerd-managed_ `containerd` process, use that instead, by supplying the address: + > `buildkitd.exe --containerd-worker-addr "npipe:////./pipe/docker-containerd"` + 7. In another terminal with administrator privileges, create a remote builder that uses the local BuildKit daemon. > [!NOTE] diff --git a/content/manuals/build/buildkit/configure.md b/content/manuals/build/buildkit/configure.md index bef1959c75b..c05b2b2dbd0 100644 --- a/content/manuals/build/buildkit/configure.md +++ b/content/manuals/build/buildkit/configure.md @@ -6,8 +6,8 @@ keywords: build, buildkit, configuration, buildx, network, cni, registry If you create a `docker-container` or `kubernetes` builder with Buildx, you can apply a custom [BuildKit configuration](toml-configuration.md) by passing the -[`--config` flag](/reference/cli/docker/buildx/create.md#config) to -the `docker buildx create` command. +[`--buildkitd-config` flag](/reference/cli/docker/buildx/create/#buildkitd-config) +to the `docker buildx create` command. ## Registry mirror @@ -34,7 +34,7 @@ defining a mirror for `docker.io` (Docker Hub) to `mirror.gcr.io`. $ docker buildx create --use --bootstrap \ --name mybuilder \ --driver docker-container \ - --config /etc/buildkitd.toml + --buildkitd-config /etc/buildkitd.toml ``` 3. Build an image: @@ -96,7 +96,7 @@ configuration. $ docker buildx create --use --bootstrap \ --name mybuilder \ --driver docker-container \ - --config /etc/buildkitd.toml + --buildkitd-config /etc/buildkitd.toml ``` 3. Inspect the builder's configuration file (`/etc/buildkit/buildkitd.toml`), it @@ -166,7 +166,7 @@ ADD https://raw.githubusercontent.com/moby/buildkit/${BUILDKIT_VERSION}/hack/fix ``` Now you can build this image, and create a builder instance from it using -[the `--driver-opt image` option](/reference/cli/docker/buildx/create.md#driver-opt): +[the `--driver-opt image` option](/reference/cli/docker/buildx/create/#driver-opt): ```console $ docker buildx build --tag buildkit-cni:local --load . @@ -183,7 +183,7 @@ $ docker buildx create --use --bootstrap \ You can limit the parallelism of the BuildKit solver, which is particularly useful for low-powered machines, using a [BuildKit configuration](toml-configuration.md) -while creating a builder with the [`--config` flags](/reference/cli/docker/buildx/create.md#config). +while creating a builder with the [`--buildkitd-config` flag](/reference/cli/docker/buildx/create/#buildkitd-config). ```toml # /etc/buildkitd.toml @@ -198,7 +198,7 @@ that will use this BuildKit configuration to limit parallelism. $ docker buildx create --use \ --name mybuilder \ --driver docker-container \ - --config /etc/buildkitd.toml + --buildkitd-config /etc/buildkitd.toml ``` ### TCP connection limit diff --git a/content/manuals/build/buildkit/dockerfile-release-notes.md b/content/manuals/build/buildkit/dockerfile-release-notes.md index 35e5a586b04..404a1a0528c 100644 --- a/content/manuals/build/buildkit/dockerfile-release-notes.md +++ b/content/manuals/build/buildkit/dockerfile-release-notes.md @@ -1,564 +1,8 @@ --- title: Dockerfile release notes -description: Release notes for Dockerfile frontend -keywords: build, dockerfile, frontend, release notes -tags: [Release notes] -toc_max: 2 +params: + sidebar: + goto: "https://github.com/moby/buildkit/releases" aliases: - /build/dockerfile/release-notes/ --- - -This page contains information about the new features, improvements, known -issues, and bug fixes in [Dockerfile reference](/reference/dockerfile.md). - -For usage, see the [Dockerfile frontend syntax](frontend.md) page. - -## 1.15.0 - -{{< release-date date="2025-04-15" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.15.0). - -```dockerfile -# syntax=docker/dockerfile:1.15.0 -``` - -- Build error for invalid target now shows suggestions for correct possible names. [moby/buildkit#5851](https://github.com/moby/buildkit/pull/5851) -- Fix SBOM attestation producing error for Windows targets. [moby/buildkit#5837](https://github.com/moby/buildkit/pull/5837) -- Fix recursive `ARG` producing an infinite loop when processing an outline request. [moby/buildkit#5823](https://github.com/moby/buildkit/pull/5823) -- Fix parsing syntax directive from JSON that would fail if the JSON had other datatypes than strings. [moby/buildkit#5815](https://github.com/moby/buildkit/pull/5815) -- Fix platform in image config being in unnormalized form (regression from 1.12). [moby/buildkit#5776](https://github.com/moby/buildkit/pull/5776) -- Fix copying into destination directory when directory is not present with WCOW. [moby/buildkit#5249](https://github.com/moby/buildkit/pull/5249) - -## 1.14.1 - -{{< release-date date="2025-03-05" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.14.1). - -```dockerfile -# syntax=docker/dockerfile:1.14.1 -``` - -- Normalize platform in image config. [moby/buildkit#5776](https://github.com/moby/buildkit/pull/5776) - -## 1.14.0 - -{{< release-date date="2025-02-19" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.14.0). - -```dockerfile -# syntax=docker/dockerfile:1.14.0 -``` - -- `COPY --chmod` now allows non-octal values. This feature was previously in the labs channel and is now available in the main release. [moby/buildkit#5734](https://github.com/moby/buildkit/pull/5734) -- Fix handling of OSVersion platform property if one is set by the base image [moby/buildkit#5714](https://github.com/moby/buildkit/pull/5714) -- Fix errors where a named context metadata could be resolved even if it was not reachable by the current build configuration, leading to build errors [moby/buildkit#5688](https://github.com/moby/buildkit/pull/5688) - -## 1.14.0 (labs) - -{{< release-date date="2025-02-19" >}} - -{{% include "dockerfile-labs-channel.md" %}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.14.0-labs). - -```dockerfile -# syntax=docker.io/docker/dockerfile-upstream:1.14.0-labs -``` - -- New `RUN --device=name,[required]` flag lets builds request CDI devices are available to the build step. Requires BuildKit v0.20.0+ [moby/buildkit#4056](https://github.com/moby/buildkit/pull/4056), [moby/buildkit#5738](https://github.com/moby/buildkit/pull/5738) - -## 1.13.0 - -{{< release-date date="2025-01-20" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.13.0). - -```dockerfile -# syntax=docker/dockerfile:1.13.0 -``` - -- New `TARGETOSVERSION`, `BUILDOSVERSION` builtin build-args are available for Windows builds, and `TARGETPLATFORM` value now also contains `OSVersion` value. [moby/buildkit#5614](https://github.com/moby/buildkit/pull/5614) -- Allow syntax forwarding for external frontends for files starting with a Byte Order Mark (BOM). [moby/buildkit#5645](https://github.com/moby/buildkit/pull/5645) -- Default `PATH` in Windows Containers has been updated with `powershell.exe` directory. [moby/buildkit#5446](https://github.com/moby/buildkit/pull/5446) -- Fix Dockerfile directive parsing to not allow invalid syntax. [moby/buildkit#5646](https://github.com/moby/buildkit/pull/5646) -- Fix case where `ONBUILD` command may have run twice on inherited stage. [moby/buildkit#5593](https://github.com/moby/buildkit/pull/5593) -- Fix possible missing named context replacement for child stages in Dockerfile. [moby/buildkit#5596](https://github.com/moby/buildkit/pull/5596) - -## 1.13.0 (labs) - -{{< release-date date="2025-01-20" >}} - -{{% include "dockerfile-labs-channel.md" %}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.13.0-labs). - -```dockerfile -# syntax=docker.io/docker/dockerfile-upstream:1.13.0-labs -``` - -- Fix support for non-octal values for `COPY --chmod`. [moby/buildkit#5626](https://github.com/moby/buildkit/pull/5626) - -## 1.12.0 - -{{< release-date date="2024-11-27" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.12.0). - -```dockerfile -# syntax=docker/dockerfile:1.12.0 -``` - -- Fix incorrect description in History line of image configuration with multiple `ARG` instructions. [moby/buildkit#5508] - -[moby/buildkit#5508]: https://github.com/moby/buildkit/pull/5508 - -## 1.11.1 - -{{< release-date date="2024-11-08" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.11.1). - -```dockerfile -# syntax=docker/dockerfile:1.11.1 -``` - -- Fix regression when using the `ONBUILD` instruction in stages inherited within the same Dockerfile. [moby/buildkit#5490] - -[moby/buildkit#5490]: https://github.com/moby/buildkit/pull/5490 - -## 1.11.0 - -{{< release-date date="2024-10-30" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.11.0). - -```dockerfile -# syntax=docker/dockerfile:1.11.0 -``` - -- The [`ONBUILD` instruction](/reference/dockerfile.md#onbuild) now supports commands that refer to other stages or images with `from`, such as `COPY --from` or `RUN mount=from=...`. [moby/buildkit#5357] -- The [`SecretsUsedInArgOrEnv`](/reference/build-checks/secrets-used-in-arg-or-env.md) build check has been improved to reduce false positives. [moby/buildkit#5208] -- A new [`InvalidDefinitionDescription`](/reference/build-checks/invalid-definition-description.md) build check recommends formatting comments for build arguments and stages descriptions. This is an [experimental check](/manuals/build/checks.md#experimental-checks). [moby/buildkit#5208], [moby/buildkit#5414] -- Multiple fixes for the `ONBUILD` instruction's progress and error handling. [moby/buildkit#5397] -- Improved error reporting for missing flag errors. [moby/buildkit#5369] -- Enhanced progress output for secret values mounted as environment variables. [moby/buildkit#5336] -- Added built-in build argument `TARGETSTAGE` to expose the name of the (final) target stage for the current build. [moby/buildkit#5431] - -## 1.11.0 (labs) - -{{% include "dockerfile-labs-channel.md" %}} - -- `COPY --chmod` now supports non-octal values. [moby/buildkit#5380] - -[moby/buildkit#5357]: https://github.com/moby/buildkit/pull/5357 -[moby/buildkit#5208]: https://github.com/moby/buildkit/pull/5208 -[moby/buildkit#5414]: https://github.com/moby/buildkit/pull/5414 -[moby/buildkit#5397]: https://github.com/moby/buildkit/pull/5397 -[moby/buildkit#5369]: https://github.com/moby/buildkit/pull/5369 -[moby/buildkit#5336]: https://github.com/moby/buildkit/pull/5336 -[moby/buildkit#5431]: https://github.com/moby/buildkit/pull/5431 -[moby/buildkit#5380]: https://github.com/moby/buildkit/pull/5380 - -## 1.10.0 - -{{< release-date date="2024-09-10" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.10.0). - -```dockerfile -# syntax=docker/dockerfile:1.10.0 -``` - -- [Build secrets](/manuals/build/building/secrets.md#target) can now be mounted as environment variables using the `env=VARIABLE` option. [moby/buildkit#5215] -- The [`# check` directive](/reference/dockerfile.md#check) now allows new experimental attribute for enabling experimental validation rules like `CopyIgnoredFile`. [moby/buildkit#5213] -- Improve validation of unsupported modifiers for variable substitution. [moby/buildkit#5146] -- `ADD` and `COPY` instructions now support variable interpolation for build arguments for the `--chmod` option values. [moby/buildkit#5151] -- Improve validation of the `--chmod` option for `COPY` and `ADD` instructions. [moby/buildkit#5148] -- Fix missing completions for size and destination attributes on mounts. [moby/buildkit#5245] -- OCI annotations are now set to the Dockerfile frontend release image. [moby/buildkit#5197] - -[moby/buildkit#5215]: https://github.com/moby/buildkit/pull/5215 -[moby/buildkit#5213]: https://github.com/moby/buildkit/pull/5213 -[moby/buildkit#5146]: https://github.com/moby/buildkit/pull/5146 -[moby/buildkit#5151]: https://github.com/moby/buildkit/pull/5151 -[moby/buildkit#5148]: https://github.com/moby/buildkit/pull/5148 -[moby/buildkit#5245]: https://github.com/moby/buildkit/pull/5245 -[moby/buildkit#5197]: https://github.com/moby/buildkit/pull/5197 - -## 1.9.0 - -{{< release-date date="2024-07-11" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.9.0). - -```dockerfile -# syntax=docker/dockerfile:1.9.0 -``` - -- Add new validation rules: - - `SecretsUsedInArgOrEnv` - - `InvalidDefaultArgInFrom` - - `RedundantTargetPlatform` - - `CopyIgnoredFile` (experimental) - - `FromPlatformFlagConstDisallowed` -- Many performance improvements for working with big Dockerfiles. [moby/buildkit#5067](https://github.com/moby/buildkit/pull/5067/), [moby/buildkit#5029](https://github.com/moby/buildkit/pull/5029/) -- Fix possible panic when building Dockerfile without defined stages. [moby/buildkit#5150](https://github.com/moby/buildkit/pull/5150/) -- Fix incorrect JSON parsing that could cause some incorrect JSON values to pass without producing an error. [moby/buildkit#5107](https://github.com/moby/buildkit/pull/5107/) -- Fix a regression where `COPY --link` with a destination path of `.` could fail. [moby/buildkit#5080](https://github.com/moby/buildkit/pull/5080/) -- Fix validation of `ADD --checksum` when used with a Git URL. [moby/buildkit#5085](https://github.com/moby/buildkit/pull/5085/) - -## 1.8.1 - -{{< release-date date="2024-06-18" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.8.1). - -```dockerfile -# syntax=docker/dockerfile:1.8.1 -``` - -### Bug fixes and enhancements - -- Fix handling of empty strings on variable expansion. [moby/buildkit#5052](https://github.com/moby/buildkit/pull/5052/) -- Improve formatting of build warnings. [moby/buildkit#5037](https://github.com/moby/buildkit/pull/5037/), [moby/buildkit#5045](https://github.com/moby/buildkit/pull/5045/), [moby/buildkit#5046](https://github.com/moby/buildkit/pull/5046/) -- Fix possible invalid output for `UndeclaredVariable` warning for multi-stage builds. [moby/buildkit#5048](https://github.com/moby/buildkit/pull/5048/) - -## 1.8.0 - -{{< release-date date="2024-06-11" >}} - -The full release note for this release is available -[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.8.0). - -```dockerfile -# syntax=docker/dockerfile:1.8.0 -``` - -- Many new validation rules have been added to verify that your Dockerfile is using best practices. These rules are validated during build and new `check` frontend method can be used to only trigger validation without completing the whole build. -- New directive `#check` and build argument `BUILDKIT_DOCKERFILE_CHECK` lets you control the behavior or build checks. [moby/buildkit#4962](https://github.com/moby/buildkit/pull/4962/) -- Using a single-platform base image that does not match your expected platform is now validated. [moby/buildkit#4924](https://github.com/moby/buildkit/pull/4924/) -- Errors from the expansion of `ARG` definitions in global scope are now handled properly. [moby/buildkit#4856](https://github.com/moby/buildkit/pull/4856/) -- Expansion of the default value of `ARG` now only happens if it is not overwritten by the user. Previously, expansion was completed and value was later ignored, which could result in an unexpected expansion error. [moby/buildkit#4856](https://github.com/moby/buildkit/pull/4856/) -- Performance of parsing huge Dockerfiles with many stages has been improved. [moby/buildkit#4970](https://github.com/moby/buildkit/pull/4970/) -- Fix some Windows path handling consistency errors. [moby/buildkit#4825](https://github.com/moby/buildkit/pull/4825/) - -## 1.7.0 - -{{< release-date date="2024-03-06" >}} - -### Stable - -```dockerfile -# syntax=docker/dockerfile:1.7 -``` - -- Variable expansion now allows string substitutions and trimming. - [moby/buildkit#4427](https://github.com/moby/buildkit/pull/4427), - [moby/buildkit#4287](https://github.com/moby/buildkit/pull/4287) -- Named contexts with local sources now correctly transfer only the files used in the Dockerfile instead of the full source directory. - [moby/buildkit#4161](https://github.com/moby/buildkit/pull/4161) -- Dockerfile now better validates the order of stages and returns nice errors with stack traces if stages are in incorrect order. - [moby/buildkit#4568](https://github.com/moby/buildkit/pull/4568), - [moby/buildkit#4567](https://github.com/moby/buildkit/pull/4567) -- History commit messages now contain flags used with `COPY` and `ADD`. - [moby/buildkit#4597](https://github.com/moby/buildkit/pull/4597) -- Progress messages for `ADD` commands from Git and HTTP sources have been improved. - [moby/buildkit#4408](https://github.com/moby/buildkit/pull/4408) - -### Labs - -```dockerfile -# syntax=docker/dockerfile:1.7-labs -``` - -- New `--parents` flag has been added to `COPY` for copying files while keeping the parent directory structure. - [moby/buildkit#4598](https://github.com/moby/buildkit/pull/4598), - [moby/buildkit#3001](https://github.com/moby/buildkit/pull/3001), - [moby/buildkit#4720](https://github.com/moby/buildkit/pull/4720), - [moby/buildkit#4728](https://github.com/moby/buildkit/pull/4728), - [docs](/reference/dockerfile.md#copy---parents) -- New `--exclude` flag can be used in `COPY` and `ADD` commands to apply filter to copied files. - [moby/buildkit#4561](https://github.com/moby/buildkit/pull/4561), - [docs](/reference/dockerfile.md#copy---exclude) - -## 1.6.0 - -{{< release-date date="2023-06-13" >}} - -### New - -- Add `--start-interval` flag to the - [`HEALTHCHECK` instruction](/reference/dockerfile.md#healthcheck). - -The following features have graduated from the labs channel to stable: - -- The `ADD` instruction can now [import files directly from Git URLs](/reference/dockerfile.md#adding-a-git-repository-add-git-ref-dir) -- The `ADD` instruction now supports [`--checksum` flag](/reference/dockerfile.md#verifying-a-remote-file-checksum-add---checksumchecksum-http-src-dest) - to validate the contents of the remote URL contents - -### Bug fixes and enhancements - -- Variable substitution now supports additional POSIX compatible variants without `:`. - [moby/buildkit#3611](https://github.com/moby/buildkit/pull/3611) -- Exported Windows images now contain OSVersion and OSFeatures values from base image. - [moby/buildkit#3619](https://github.com/moby/buildkit/pull/3619) -- Changed the permissions for Heredocs to 0644. - [moby/buildkit#3992](https://github.com/moby/buildkit/pull/3992) - -## 1.5.2 - -{{< release-date date="2023-02-14" >}} - -### Bug fixes and enhancements - -- Fix building from Git reference that is missing branch name but contains a - subdir -- 386 platform image is now included in the release - -## 1.5.1 - -{{< release-date date="2023-01-18" >}} - -### Bug fixes and enhancements - -- Fix possible panic when warning conditions appear in multi-platform builds - -## 1.5.0 (labs) - -{{< release-date date="2023-01-10" >}} - -{{% include "dockerfile-labs-channel.md" %}} - -### New - -- `ADD` command now supports [`--checksum` flag](/reference/dockerfile.md#verifying-a-remote-file-checksum-add---checksumchecksum-http-src-dest) - to validate the contents of the remote URL contents - -## 1.5.0 - -{{< release-date date="2023-01-10" >}} - -### New - -- `ADD` command can now [import files directly from Git URLs](/reference/dockerfile.md#adding-a-git-repository-add-git-ref-dir) - -### Bug fixes and enhancements - -- Named contexts now support `oci-layout://` protocol for including images from - local OCI layout structure -- Dockerfile now supports secondary requests for listing all build targets or - printing outline of accepted parameters for a specific build target -- Dockerfile `#syntax` directive that redirects to an external frontend image - now allows the directive to be also set with `//` comments or JSON. The file - may also contain a shebang header -- Named context can now be initialized with an empty scratch image -- Named contexts can now be initialized with an SSH Git URL -- Fix handling of `ONBUILD` when importing Schema1 images - -## 1.4.3 - -{{< release-date date="2022-08-23" >}} - -### Bug fixes and enhancements - -- Fix creation timestamp not getting reset when building image from - `docker-image://` named context -- Fix passing `--platform` flag of `FROM` command when loading - `docker-image://` named context - -## 1.4.2 - -{{< release-date date="2022-05-06" >}} - -### Bug fixes and enhancements - -- Fix loading certain environment variables from an image passed with built - context - -## 1.4.1 - -{{< release-date date="2022-04-08" >}} - -### Bug fixes and enhancements - -- Fix named context resolution for cross-compilation cases from input when input - is built for a different platform - -## 1.4.0 - -{{< release-date date="2022-03-09" >}} - -### New - -- [`COPY --link` and `ADD --link`](/reference/dockerfile.md#copy---link) - allow copying files with increased cache efficiency and rebase images without - requiring them to be rebuilt. `--link` copies files to a separate layer and - then uses new LLB MergeOp implementation to chain independent layers together -- [Heredocs](/reference/dockerfile.md#here-documents) support have - been promoted from labs channel to stable. This feature allows writing - multiline inline scripts and files -- Additional [named build contexts](/reference/cli/docker/buildx/build.md#build-context) - can be passed to build to add or overwrite a stage or an image inside the - build. A source for the context can be a local source, image, Git, or HTTP URL -- [`BUILDKIT_SANDBOX_HOSTNAME` build-arg](/reference/dockerfile.md#buildkit-built-in-build-args) - can be used to set the default hostname for the `RUN` steps - -### Bug fixes and enhancements - -- When using a cross-compilation stage, the target platform for a step is now - seen on progress output -- Fix some cases where Heredocs incorrectly removed quotes from content - -## 1.3.1 - -{{< release-date date="2021-10-04" >}} - -### Bug fixes and enhancements - -- Fix parsing "required" mount key without a value - -## 1.3.0 (labs) - -{{< release-date date="2021-07-16" >}} - -{{% include "dockerfile-labs-channel.md" %}} - -### New - -- `RUN` and `COPY` commands now support [Here-document syntax](/reference/dockerfile.md#here-documents) - allowing writing multiline inline scripts and files - -## 1.3.0 - -{{< release-date date="2021-07-16" >}} - -### New - -- `RUN` command allows [`--network` flag](/reference/dockerfile.md#run---network) - for requesting a specific type of network conditions. `--network=host` - requires allowing `network.host` entitlement. This feature was previously - only available on labs channel - -### Bug fixes and enhancements - -- `ADD` command with a remote URL input now correctly handles the `--chmod` flag -- Values for [`RUN --mount` flag](/reference/dockerfile.md#run---mount) - now support variable expansion, except for the `from` field -- Allow [`BUILDKIT_MULTI_PLATFORM` build arg](/reference/dockerfile.md#buildkit-built-in-build-args) - to force always creating multi-platform image, even if only contains single - platform - -## 1.2.1 (labs) - -{{< release-date date="2020-12-12" >}} - -{{% include "dockerfile-labs-channel.md" %}} - -### Bug fixes and enhancements - -- `RUN` command allows [`--network` flag](/reference/dockerfile.md#run---network) - for requesting a specific type of network conditions. `--network=host` - requires allowing `network.host` entitlement - -## 1.2.1 - -{{< release-date date="2020-12-12" >}} - -### Bug fixes and enhancements - -- Revert "Ensure ENTRYPOINT command has at least one argument" -- Optimize processing `COPY` calls on multi-platform cross-compilation builds - -## 1.2.0 (labs) - -{{< release-date date="2020-12-03" >}} - -{{% include "dockerfile-labs-channel.md" %}} - -### Bug fixes and enhancements - -- Experimental channel has been renamed to _labs_ - -## 1.2.0 - -{{< release-date date="2020-12-03" >}} - -### New - -- [`RUN --mount` syntax](/reference/dockerfile.md#run---mount) for - creating secret, ssh, bind, and cache mounts have been moved to mainline - channel -- [`ARG` command](/reference/dockerfile.md#arg) now supports defining - multiple build args on the same line similarly to `ENV` - -### Bug fixes and enhancements - -- Metadata load errors are now handled as fatal to avoid incorrect build results -- Allow lowercase Dockerfile name -- `--chown` flag in `ADD` now allows parameter expansion -- `ENTRYPOINT` requires at least one argument to avoid creating broken images - -## 1.1.7 - -{{< release-date date="2020-04-18" >}} - -### Bug fixes and enhancements - -- Forward `FrontendInputs` to the gateway - -## 1.1.2 (labs) - -{{< release-date date="2019-07-31" >}} - -{{% include "dockerfile-labs-channel.md" %}} - -### Bug fixes and enhancements - -- Allow setting security mode for a process with `RUN --security=sandbox|insecure` -- Allow setting uid/gid for [cache mounts](/reference/dockerfile.md#run---mounttypecache) -- Avoid requesting internally linked paths to be pulled to build context -- Ensure missing cache IDs default to target paths -- Allow setting namespace for cache mounts with [`BUILDKIT_CACHE_MOUNT_NS` build arg](/reference/dockerfile.md#buildkit-built-in-build-args) - -## 1.1.2 - -{{< release-date date="2019-07-31" >}} - -### Bug fixes and enhancements - -- Fix workdir creation with correct user and don't reset custom ownership -- Fix handling empty build args also used as `ENV` -- Detect circular dependencies - -## 1.1.0 - -{{< release-date date="2019-04-27" >}} - -### New - -- `ADD/COPY` commands now support implementation based on `llb.FileOp` and do - not require helper image if builtin file operations support is available -- `--chown` flag for `COPY` command now supports variable expansion - -### Bug fixes and enhancements - -- To find the files ignored from the build context Dockerfile frontend will - first look for a file `.dockerignore` and if it is not - found `.dockerignore` file will be looked up from the root of the build - context. This allows projects with multiple Dockerfiles to use different - `.dockerignore` definitions diff --git a/content/manuals/build/cache/backends/_index.md b/content/manuals/build/cache/backends/_index.md index 3606910a44d..aac96afbdd0 100644 --- a/content/manuals/build/cache/backends/_index.md +++ b/content/manuals/build/cache/backends/_index.md @@ -25,7 +25,7 @@ Other cache backends require you to select a different [driver](/manuals/build/b > > If you use secrets or credentials inside your build process, ensure you > manipulate them using the dedicated -> [`--secret` option](/reference/cli/docker/buildx/build.md#secret). +> [`--secret` option](/reference/cli/docker/buildx/build/#secret). > Manually managing secrets using `COPY` or `ARG` could result in leaked > credentials. @@ -56,9 +56,9 @@ Buildx supports the following cache storage backends: ## Command syntax To use any of the cache backends, you first need to specify it on build with the -[`--cache-to` option](/reference/cli/docker/buildx/build.md#cache-to) +[`--cache-to` option](/reference/cli/docker/buildx/build/#cache-to) to export the cache to your storage backend of choice. Then, use the -[`--cache-from` option](/reference/cli/docker/buildx/build.md#cache-from) +[`--cache-from` option](/reference/cli/docker/buildx/build/#cache-from) to import the cache from the storage backend into the current build. Unlike the local BuildKit cache (which is always enabled), all of the cache storage backends must be explicitly exported to, and explicitly imported from. diff --git a/content/manuals/build/cache/backends/gha.md b/content/manuals/build/cache/backends/gha.md index 9b3f5c1040d..81b23999598 100644 --- a/content/manuals/build/cache/backends/gha.md +++ b/content/manuals/build/cache/backends/gha.md @@ -30,21 +30,23 @@ $ docker buildx build --push -t / \ The following table describes the available CSV parameters that you can pass to `--cache-to` and `--cache-from`. -| Name | Option | Type | Default | Description | -|----------------|-------------------------|-------------|--------------------------|----------------------------------------------------------------------| -| `url` | `cache-to`,`cache-from` | String | `$ACTIONS_CACHE_URL` | Cache server URL, see [authentication][1]. | -| `url_v2` | `cache-to`,`cache-from` | String | `$ACTIONS_CACHE_URL` | Cache v2 server URL, see [authentication][1]. | -| `token` | `cache-to`,`cache-from` | String | `$ACTIONS_RUNTIME_TOKEN` | Access token, see [authentication][1]. | -| `scope` | `cache-to`,`cache-from` | String | `buildkit` | Which scope cache object belongs to, see [scope][2] | -| `mode` | `cache-to` | `min`,`max` | `min` | Cache layers to export, see [cache mode][3]. | -| `ignore-error` | `cache-to` | Boolean | `false` | Ignore errors caused by failed cache exports. | -| `timeout` | `cache-to`,`cache-from` | String | `10m` | Max duration for importing or exporting cache before it's timed out. | -| `repository` | `cache-to` | String | | GitHub repository used for cache storage. | -| `ghtoken` | `cache-to` | String | | GitHub token required for accessing the GitHub API. | +| Name | Option | Type | Default | Description | +|----------------|-------------------------|-------------|------------------------------------------------|----------------------------------------------------------------------| +| `url` | `cache-to`,`cache-from` | String | `$ACTIONS_CACHE_URL` or `$ACTIONS_RESULTS_URL` | Cache server URL, see [authentication][1]. Ignored when `version=2`. | +| `url_v2` | `cache-to`,`cache-from` | String | `$ACTIONS_RESULTS_URL` | Cache v2 server URL, see [authentication][1]. | +| `token` | `cache-to`,`cache-from` | String | `$ACTIONS_RUNTIME_TOKEN` | Access token, see [authentication][1]. | +| `scope` | `cache-to`,`cache-from` | String | `buildkit` | Which scope cache object belongs to, see [scope][2] | +| `mode` | `cache-to` | `min`,`max` | `min` | Cache layers to export, see [cache mode][3]. | +| `ignore-error` | `cache-to` | Boolean | `false` | Ignore errors caused by failed cache exports. | +| `timeout` | `cache-to`,`cache-from` | String | `10m` | Max duration for importing or exporting cache before it's timed out. | +| `repository` | `cache-to` | String | | GitHub repository used for cache storage. | +| `ghtoken` | `cache-to` | String | | GitHub token required for accessing the GitHub API. | +| `version` | `cache-to`,`cache-from` | String | `1` unless `$ACTIONS_CACHE_SERVICE_V2` is set, then `2` | Selects GitHub Actions cache version, see [version][4] | [1]: #authentication [2]: #scope [3]: _index.md#cache-mode +[4]: #version ## Authentication @@ -78,6 +80,15 @@ GitHub's [cache access restrictions](https://docs.github.com/en/actions/advanced still apply. Only the cache for the current branch, the base branch and the default branch is accessible by a workflow. +## Version + +If you don’t set `version` explicitly, the default is v1. However, if the environment variable `$ACTIONS_CACHE_SERVICE_V2` is set to a value interpreted as `true` ( `1`, `true`, `yes`), then v2 is used automatically. + +Only one URL is relevant at a time: + + - With v1, use `url` (defaults to `$ACTIONS_CACHE_URL`). + - With v2, use `url_v2` (defaults to `$ACTIONS_RESULTS_URL`). + ### Using `docker/build-push-action` When using the @@ -89,7 +100,7 @@ For example: ```yaml - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: context: . push: true @@ -138,7 +149,7 @@ action. You can also set the `ghtoken` parameter manually using the ```yaml - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: context: . push: true diff --git a/content/manuals/build/cache/backends/s3.md b/content/manuals/build/cache/backends/s3.md index abfbfe80c20..99831816ca3 100644 --- a/content/manuals/build/cache/backends/s3.md +++ b/content/manuals/build/cache/backends/s3.md @@ -27,22 +27,23 @@ $ docker buildx build --push -t / \ The following table describes the available CSV parameters that you can pass to `--cache-to` and `--cache-from`. -| Name | Option | Type | Default | Description | -| -------------------- | ----------------------- | ----------- | ------- | -------------------------------------------------------------- | -| `region` | `cache-to`,`cache-from` | String | | Required. Geographic location. | -| `bucket` | `cache-to`,`cache-from` | String | | Required. Name of the S3 bucket. | -| `name` | `cache-to`,`cache-from` | String | | Name of the cache image. | -| `endpoint_url` | `cache-to`,`cache-from` | String | | Endpoint of the S3 bucket. | -| `blobs_prefix` | `cache-to`,`cache-from` | String | | Prefix to prepend to blob filenames. | -| `upload_parallelism` | `cache-to` | Integer | `4` | Number of parallel layer uploads. | -| `touch_refresh` | `cache-to` | Time | `24h` | Interval for updating the timestamp of unchanged cache layers. | -| `manifests_prefix` | `cache-to`,`cache-from` | String | | Prefix to prepend on manifest filenames. | -| `use_path_style` | `cache-to`,`cache-from` | Boolean | `false` | When `true`, uses `bucket` in the URL instead of hostname. | -| `access_key_id` | `cache-to`,`cache-from` | String | | See [authentication][1]. | -| `secret_access_key` | `cache-to`,`cache-from` | String | | See [authentication][1]. | -| `session_token` | `cache-to`,`cache-from` | String | | See [authentication][1]. | -| `mode` | `cache-to` | `min`,`max` | `min` | Cache layers to export, see [cache mode][2]. | -| `ignore-error` | `cache-to` | Boolean | `false` | Ignore errors caused by failed cache exports. | +| Name | Option | Type | Default | Description | +|----------------------| ----------------------- | ----------- |--------------|----------------------------------------------------------------| +| `region` | `cache-to`,`cache-from` | String | | Required. Geographic location. | +| `bucket` | `cache-to`,`cache-from` | String | | Required. Name of the S3 bucket. | +| `name` | `cache-to`,`cache-from` | String | `buildkit` | Name of the cache image. | +| `endpoint_url` | `cache-to`,`cache-from` | String | | Endpoint of the S3 bucket. | +| `prefix` | `cache-to`,`cache-from` | String | | Prefix to prepend to all filenames. | +| `blobs_prefix` | `cache-to`,`cache-from` | String | `blobs/` | Prefix to prepend to blob filenames. | +| `upload_parallelism` | `cache-to` | Integer | `4` | Number of parallel layer uploads. | +| `touch_refresh` | `cache-to` | Time | `24h` | Interval for updating the timestamp of unchanged cache layers. | +| `manifests_prefix` | `cache-to`,`cache-from` | String | `manifests/` | Prefix to prepend to manifest filenames. | +| `use_path_style` | `cache-to`,`cache-from` | Boolean | `false` | When `true`, uses `bucket` in the URL instead of hostname. | +| `access_key_id` | `cache-to`,`cache-from` | String | | See [authentication][1]. | +| `secret_access_key` | `cache-to`,`cache-from` | String | | See [authentication][1]. | +| `session_token` | `cache-to`,`cache-from` | String | | See [authentication][1]. | +| `mode` | `cache-to` | `min`,`max` | `min` | Cache layers to export, see [cache mode][2]. | +| `ignore-error` | `cache-to` | Boolean | `false` | Ignore errors caused by failed cache exports. | [1]: #authentication [2]: _index.md#cache-mode @@ -57,7 +58,7 @@ Alternatively, you can use the `access_key_id`, `secret_access_key`, and Refer to [AWS Go SDK, Specifying Credentials][3] for details about authentication using environment variables and credentials file. -[3]: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials +[3]: https://docs.aws.amazon.com/sdk-for-go/v2/developer-guide/configure-gosdk.html#specifying-credentials ## Further reading diff --git a/content/manuals/build/cache/garbage-collection.md b/content/manuals/build/cache/garbage-collection.md index 9e35068d24c..7609bab11a0 100644 --- a/content/manuals/build/cache/garbage-collection.md +++ b/content/manuals/build/cache/garbage-collection.md @@ -6,8 +6,8 @@ aliases: - /build/building/cache/garbage-collection/ --- -While [`docker builder prune`](/reference/cli/docker/builder/prune.md) -or [`docker buildx prune`](/reference/cli/docker/buildx/prune.md) +While [`docker builder prune`](/reference/cli/docker/builder/prune/) +or [`docker buildx prune`](/reference/cli/docker/buildx/prune/) commands run at once, Garbage Collection (GC) runs periodically and follows an ordered list of prune policies. The BuildKit daemon clears the build cache when the cache size becomes too big, or when the cache age expires. @@ -119,15 +119,15 @@ default GC policies resolve to: "enabled": true, "policy": [ { - "keepStorage": "2.764GB", + "reservedSpace": "2.764GB", + "keepDuration": "48h", "filter": [ - "unused-for=48h", - "type==source.local,type==exec.cachemount,type==source.git.checkout" + "type=source.local,type=exec.cachemount,type=source.git.checkout" ] }, - { "keepStorage": "20GB", "filter": ["unused-for=1440h"] }, - { "keepStorage": "20GB" }, - { "keepStorage": "20GB", "all": true } + { "reservedSpace": "20GB", "keepDuration": ["1440h"] }, + { "reservedSpace": "20GB" }, + { "reservedSpace": "20GB", "all": true } ] } } @@ -140,6 +140,8 @@ is to adjust the `defaultKeepStorage` option: - Increase the limit if you feel like you think the GC is too aggressive. - Decrease the limit if you need to preserve space. +#### Custom GC policies in the Docker daemon configuration file + If you need even more control, you can define your own GC policies directly. The following example defines a more conservative GC configuration with the following policies: @@ -153,19 +155,30 @@ following policies: "builder": { "gc": { "enabled": true, - "defaultKeepStorage": "50GB", "policy": [ - { "keepStorage": "0", "filter": ["unused-for=1440h"] }, - { "keepStorage": "0" }, - { "keepStorage": "100GB", "all": true } + { "reservedSpace": "50GB", "keepDuration": ["1440h"] }, + { "reservedSpace": "50GB" }, + { "reservedSpace": "100GB", "all": true } ] } } } ``` -Policies 1 and 2 here set `keepStorage` to `0`, which means they'll fall back -to the default limit of 50GB as defined by `defaultKeepStorage`. +> [!NOTE] +> In the Docker daemon configuration file, the "equals" operator in GC filters +> is denoted using a single `=`, whereas BuildKit's configuration file uses +> `==`: +> +> | `daemon.json` | `buildkitd.toml` | +> |---------------------|----------------------| +> | `type=source.local` | `type==source.local` | +> | `private=true` | `private==true` | +> | `shared=true` | `shared==true` | +> +> See [prune filters](/reference/cli/docker/buildx/prune/#filter) for +> information about available GC filters. GC configuration in `daemon.json` +> supports all filters except `mutable` and `immutable`. ### BuildKit configuration file @@ -288,3 +301,6 @@ when defining a GC policy you have two additional configuration options: pruned. - `filters`: Filters let you specify specific types of cache records that a GC policy is allowed to prune. + +See [buildx prune filters](/reference/cli/docker/buildx/prune/#filter) for +information about available GC filters. diff --git a/content/manuals/build/cache/invalidation.md b/content/manuals/build/cache/invalidation.md index 535f3df2816..4bf3ee7d327 100644 --- a/content/manuals/build/cache/invalidation.md +++ b/content/manuals/build/cache/invalidation.md @@ -44,6 +44,23 @@ If your build contains several layers and you want to ensure the build cache is reusable, order the instructions from less frequently changed to more frequently changed where possible. +## WORKDIR and SOURCE_DATE_EPOCH + +The `WORKDIR` instruction respects the `SOURCE_DATE_EPOCH` build argument when +determining cache validity. Changing `SOURCE_DATE_EPOCH` between builds +invalidates the cache for `WORKDIR` and all subsequent instructions. + +`SOURCE_DATE_EPOCH` sets timestamps for files created during the build. If you +set this to a dynamic value like a Git commit timestamp, the cache breaks with +each commit. This is expected behavior when tracking build provenance. + +For reproducible builds without frequent cache invalidation, use a fixed +timestamp: + +```console +$ docker build --build-arg SOURCE_DATE_EPOCH=0 . +``` + ## RUN instructions The cache for `RUN` instructions isn't invalidated automatically between builds. @@ -60,7 +77,7 @@ To force a re-execution of the `RUN` instruction, you can: - Make sure that a layer before it has changed - Clear the build cache ahead of the build using - [`docker builder prune`](/reference/cli/docker/builder/prune.md) + [`docker builder prune`](/reference/cli/docker/builder/prune/) - Use the `--no-cache` or `--no-cache-filter` options The `--no-cache-filter` option lets you specify a specific build stage to diff --git a/content/manuals/build/cache/optimize.md b/content/manuals/build/cache/optimize.md index 3e0dfed9f78..7f9fe9c0aba 100644 --- a/content/manuals/build/cache/optimize.md +++ b/content/manuals/build/cache/optimize.md @@ -116,13 +116,16 @@ instruction in your Dockerfile: ```dockerfile FROM golang:latest -WORKDIR /app +WORKDIR /build RUN --mount=type=bind,target=. go build -o /app/hello ``` -In this example, the current directory is mounted into the build container -before the `go build` command gets executed. The source code is available in -the build container for the duration of that `RUN` instruction. When the +In this example, the current directory is mounted into the build container at +`/build` before the `go build` command gets executed. The build output is +written to `/app/hello`, which is outside the mount point. This distinction is +important: the build output must be written outside the bind mount target, +since the mount is read-only by default. The source code is available in the +build container for the duration of that `RUN` instruction. When the instruction is done executing, the mounted files are not persisted in the final image, or in the build cache. Only the output of the `go build` command remains. @@ -225,6 +228,7 @@ tool you're using. Here are a few examples: ```dockerfile RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ go build -o /app/hello ``` @@ -324,16 +328,16 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true tags: user/app:latest diff --git a/content/manuals/build/checks.md b/content/manuals/build/checks.md index afbf8239266..a6aa1f18828 100644 --- a/content/manuals/build/checks.md +++ b/content/manuals/build/checks.md @@ -1,12 +1,7 @@ --- title: Checking your build configuration linkTitle: Build checks -params: - sidebar: - badge: - color: green - text: New -weight: 30 +weight: 20 description: Learn how to use build checks to validate your build configuration. keywords: build, buildx, buildkit, checks, validate, configuration, lint --- @@ -38,8 +33,8 @@ Build checks are useful for: > [!TIP] > -> Want a better editing experience for Dockerfiles in VS Code? -> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. +> To improve linting, code navigation, and vulnerability scanning of your Dockerfiles in Visual Studio Code +> see the [Docker DX](https://marketplace.visualstudio.com/items?itemName=docker.docker) extension. ## Build with checks @@ -79,7 +74,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Build and push - uses: docker/build-push-action@v6.6.0 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} ``` ![GitHub Actions build check annotations](./images/gha-check-annotations.png) diff --git a/content/manuals/build/ci/github-actions/_index.md b/content/manuals/build/ci/github-actions/_index.md index 4f80a110be5..68968242d5c 100644 --- a/content/manuals/build/ci/github-actions/_index.md +++ b/content/manuals/build/ci/github-actions/_index.md @@ -29,7 +29,7 @@ The following GitHub Actions are available: - [Docker Setup Compose](https://github.com/marketplace/actions/docker-setup-compose): installs and sets up [Compose](../../../compose). - [Docker Setup Docker](https://github.com/marketplace/actions/docker-setup-docker): - installs Docker CE. + installs Docker Engine. - [Docker Setup QEMU](https://github.com/marketplace/actions/docker-setup-qemu): installs [QEMU](https://github.com/qemu/qemu) static binaries for multi-platform builds. diff --git a/content/manuals/build/ci/github-actions/annotations.md b/content/manuals/build/ci/github-actions/annotations.md index 5491d671b9d..6faee4ab38d 100644 --- a/content/manuals/build/ci/github-actions/annotations.md +++ b/content/manuals/build/ci/github-actions/annotations.md @@ -34,22 +34,22 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Extract metadata id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@{{% param "metadata_action_version" %}} with: images: ${{ env.IMAGE_NAME }} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: tags: ${{ steps.meta.outputs.tags }} annotations: ${{ steps.meta.outputs.annotations }} @@ -73,22 +73,22 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Extract metadata id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@{{% param "metadata_action_version" %}} with: images: ${{ env.IMAGE_NAME }} - name: Build - uses: docker/bake-action@v6 + uses: docker/bake-action@{{% param "bake_action_version" %}} with: files: | ./docker-bake.hcl @@ -126,24 +126,24 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Extract metadata id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@{{% param "metadata_action_version" %}} with: images: ${{ env.IMAGE_NAME }} env: DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: tags: ${{ steps.meta.outputs.tags }} annotations: ${{ steps.meta.outputs.annotations }} diff --git a/content/manuals/build/ci/github-actions/attestations.md b/content/manuals/build/ci/github-actions/attestations.md index eb99baf0d2d..75a1c822b72 100644 --- a/content/manuals/build/ci/github-actions/attestations.md +++ b/content/manuals/build/ci/github-actions/attestations.md @@ -34,7 +34,7 @@ attestations to your image, with the following conditions: > arguments to pass secrets to your build, such as user credentials or > authentication tokens, those secrets are exposed in the provenance > attestation. Refactor your build to pass those secrets using -> [secret mounts](/reference/cli/docker/buildx/build.md#secret) +> [secret mounts](/reference/cli/docker/buildx/build/#secret) > instead. Also remember to rotate any secrets you may have exposed. ## Max-level provenance @@ -63,22 +63,22 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Extract metadata id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@{{% param "metadata_action_version" %}} with: images: ${{ env.IMAGE_NAME }} - name: Build and push image - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true provenance: mode=max @@ -109,22 +109,22 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Extract metadata id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@{{% param "metadata_action_version" %}} with: images: ${{ env.IMAGE_NAME }} - name: Build and push image - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: sbom: true push: true diff --git a/content/manuals/build/ci/github-actions/build-summary.md b/content/manuals/build/ci/github-actions/build-summary.md index 9472ead8e68..6bf130667c6 100644 --- a/content/manuals/build/ci/github-actions/build-summary.md +++ b/content/manuals/build/ci/github-actions/build-summary.md @@ -19,8 +19,8 @@ versions of the [Build and push Docker images](https://github.com/marketplace/ac or [Docker Buildx Bake](https://github.com/marketplace/actions/docker-buildx-bake) GitHub Actions: -- `docker/build-push-action@v6` -- `docker/bake-action@v6` +- `docker/build-push-action@{{% param "build_push_action_version" %}}` +- `docker/bake-action@{{% param "bake_action_version" %}}` To view the job summary, open the details page for the job in GitHub after the job has finished. The summary is available for both failed and successful @@ -31,8 +31,6 @@ message that caused the build to fail: ## Import build records to Docker Desktop -{{< summary-bar feature_name="Import builds" >}} - The job summary includes a link for downloading a build record archive for the run. The build record archive is a ZIP file containing the details about a build (or builds, if you use `docker/bake-action` to build multiple targets). You can @@ -67,7 +65,7 @@ in the YAML configuration for your build step: ```yaml {hl_lines=4} - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} env: DOCKER_BUILD_SUMMARY: false with: @@ -83,7 +81,7 @@ your build step: ```yaml {hl_lines=4} - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} env: DOCKER_BUILD_RECORD_UPLOAD: false with: @@ -98,7 +96,5 @@ contain a link to download the build record archive. Build summaries are currently not supported for: -- Builds using [Docker Build Cloud](/manuals/build-cloud/_index.md). Support for Docker - Build Cloud is planned for a future release. - Repositories hosted on GitHub Enterprise Servers. Summaries can only be viewed for repositories hosted on GitHub.com. diff --git a/content/manuals/build/ci/github-actions/cache.md b/content/manuals/build/ci/github-actions/cache.md index 5626447e1e4..ff3ed02f07b 100644 --- a/content/manuals/build/ci/github-actions/cache.md +++ b/content/manuals/build/ci/github-actions/cache.md @@ -30,16 +30,16 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true tags: user/app:latest @@ -63,16 +63,16 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - + - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true tags: user/app:latest @@ -104,16 +104,16 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - + - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true tags: user/app:latest @@ -123,14 +123,14 @@ jobs: > [!IMPORTANT] > -> Starting [April 15th, 2025, only GitHub Cache service API v2 will be supported](https://gh.io/gha-cache-sunset). -> +> As of April 15th, 2025, [only GitHub Cache service API v2 is supported.](https://gh.io/gha-cache-sunset). The legacy v1 API has been shut down. +> > If you encounter the following error during your build: -> +> > ```console > ERROR: failed to solve: This legacy service is shutting down, effective April 15, 2025. Migrate to the new service ASAP. For more information: https://gh.io/gha-cache-sunset > ``` -> +> > You're probably using outdated tools that only support the legacy GitHub > Cache service API v1. Here are the minimum versions you need to upgrade to > depending on your use case: @@ -138,37 +138,37 @@ jobs: > * BuildKit >= v0.20.0 > * Docker Compose >= v2.33.1 > * Docker Engine >= v28.0.0 (if you're building using the Docker driver with containerd image store enabled) -> +> > If you're building using the `docker/build-push-action` or `docker/bake-action` > actions on GitHub hosted runners, Docker Buildx and BuildKit are already up > to date but on self-hosted runners, you may need to update them yourself. > Alternatively, you can use the `docker/setup-buildx-action` action to install > the latest version of Docker Buildx: -> +> > ```yaml > - name: Set up Docker Buildx -> uses: docker/setup-buildx-action@v3 +> uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} > with: > version: latest > ``` -> +> > If you're building using Docker Compose, you can use the > `docker/setup-compose-action` action: -> +> > ```yaml > - name: Set up Docker Compose -> uses: docker/setup-compose-action@v1 +> uses: docker/setup-compose-action@{{% param "setup_compose_action_version" %}} > with: > version: latest > ``` -> +> > If you're building using the Docker Engine with the containerd image store > enabled, you can use the `docker/setup-docker-action` action: -> +> > ```yaml > - > name: Set up Docker -> uses: docker/setup-docker-action@v4 +> uses: docker/setup-docker-action@{{% param "setup_docker_action_version" %}} > with: > version: latest > daemon-config: | @@ -182,7 +182,7 @@ jobs: ### Cache mounts BuildKit doesn't preserve cache mounts in the GitHub Actions cache by default. -If you wish to put your cache mounts into GitHub Actions cache and reuse it +To put your cache mounts into GitHub Actions cache and reuse it between builds, you can use a workaround provided by [`reproducible-containers/buildkit-cache-dance`](https://github.com/reproducible-containers/buildkit-cache-dance). @@ -197,13 +197,16 @@ Example Dockerfile in `build/package/Dockerfile` FROM golang:1.21.1-alpine as base-build WORKDIR /build -RUN go env -w GOMODCACHE=/root/.cache/go-build -COPY go.mod go.sum ./ -RUN --mount=type=cache,target=/root/.cache/go-build go mod download +RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=bind,source=go.mod,target=go.mod \ + --mount=type=bind,source=go.sum,target=go.sum \ + go mod download -COPY ./src ./ -RUN --mount=type=cache,target=/root/.cache/go-build go build -o /bin/app /build/src +RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=bind,target=. \ + go build -o /bin/app ./src ... ``` @@ -220,20 +223,20 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - + - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Docker meta id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@{{% param "metadata_action_version" %}} with: images: user/app tags: | @@ -243,7 +246,7 @@ jobs: type=semver,pattern={{major}}.{{minor}} - name: Go Build Cache for Docker - uses: actions/cache@v4 + uses: actions/cache@{{% param "cache_action_version" %}} with: path: go-build-cache key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }} @@ -254,7 +257,7 @@ jobs: cache-source: go-build-cache - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: cache-from: type=gha cache-to: type=gha,mode=max @@ -291,16 +294,16 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Cache Docker layers - uses: actions/cache@v4 + uses: actions/cache@{{% param "cache_action_version" %}} with: path: ${{ runner.temp }}/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} @@ -308,7 +311,7 @@ jobs: ${{ runner.os }}-buildx- - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true tags: user/app:latest diff --git a/content/manuals/build/ci/github-actions/checks.md b/content/manuals/build/ci/github-actions/checks.md index c9dbccff14c..c9c15d468bd 100644 --- a/content/manuals/build/ci/github-actions/checks.md +++ b/content/manuals/build/ci/github-actions/checks.md @@ -25,21 +25,21 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Validate build configuration - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: call: check - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true tags: user/app:latest @@ -79,22 +79,48 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Validate build configuration - uses: docker/bake-action@v6 + uses: docker/bake-action@{{% param "bake_action_version" %}} with: targets: validate-build - name: Build - uses: docker/bake-action@v6 + uses: docker/bake-action@{{% param "bake_action_version" %}} with: targets: build push: true ``` + +### Using the `call` input directly + +You can also set the build method with the `call` input which is equivalent to using the `--call` flag with `docker buildx bake` + +For example, to run a check without defining `call` in your Bake file: + +```yaml +name: ci + +on: + push: + +jobs: + docker: + runs-on: ubuntu-latest + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + + - name: Validate build configuration + uses: docker/bake-action@{{% param "bake_action_version" %}} + with: + targets: build + call: check +``` diff --git a/content/manuals/build/ci/github-actions/configure-builder.md b/content/manuals/build/ci/github-actions/configure-builder.md index 4d6bd1c981a..9ba29245364 100644 --- a/content/manuals/build/ci/github-actions/configure-builder.md +++ b/content/manuals/build/ci/github-actions/configure-builder.md @@ -19,7 +19,7 @@ to pin to Buildx v0.10.0: ```yaml - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: version: v0.10.0 ``` @@ -29,7 +29,7 @@ To pin to a specific version of BuildKit, use the `image` option in the ```yaml - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: driver-opts: image=moby/buildkit:v0.11.0 ``` @@ -51,12 +51,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: buildkitd-flags: --debug - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} ``` Logs will be available at the end of a job: @@ -85,7 +85,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: buildkitd-config-inline: | [registry."docker.io"] @@ -120,7 +120,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: config: .github/buildkitd.toml ``` @@ -140,11 +140,11 @@ fields: | Name | Type | Description | | ----------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | String | [Name of the node](/reference/cli/docker/buildx/create.md#node). If empty, it's the name of the builder it belongs to, with an index number suffix. This is useful to set it if you want to modify/remove a node in an underlying step of you workflow. | -| `endpoint` | String | [Docker context or endpoint](/reference/cli/docker/buildx/create.md#description) of the node to add to the builder | -| `driver-opts` | List | List of additional [driver-specific options](/reference/cli/docker/buildx/create.md#driver-opt) | -| `buildkitd-flags` | String | [Flags for buildkitd](/reference/cli/docker/buildx/create.md#buildkitd-flags) daemon | -| `platforms` | String | Fixed [platforms](/reference/cli/docker/buildx/create.md#platform) for the node. If not empty, values take priority over the detected ones. | +| `name` | String | [Name of the node](/reference/cli/docker/buildx/create/#node). If empty, it's the name of the builder it belongs to, with an index number suffix. This is useful to set it if you want to modify/remove a node in an underlying step of you workflow. | +| `endpoint` | String | [Docker context or endpoint](/reference/cli/docker/buildx/create/#description) of the node to add to the builder | +| `driver-opts` | List | List of additional [driver-specific options](/reference/cli/docker/buildx/create/#driver-opt) | +| `buildkitd-flags` | String | [Flags for buildkitd](/reference/cli/docker/buildx/create/#buildkitd-flags) daemon | +| `platforms` | String | Fixed [platforms](/reference/cli/docker/buildx/create/#platform) for the node. If not empty, values take priority over the detected ones. | Here is an example using remote nodes with the [`remote` driver](/manuals/build/builders/drivers/remote.md) and [TLS authentication](#tls-authentication): @@ -160,7 +160,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: driver: remote endpoint: tcp://oneprovider:1234 @@ -209,7 +209,7 @@ jobs: private-key-name: aws_graviton2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: endpoint: ssh://me@graviton2 ``` @@ -238,7 +238,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: driver: remote endpoint: tcp://graviton2:1234 @@ -266,10 +266,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@{{% param "checkout_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: driver: kubernetes @@ -303,21 +303,21 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up builder1 - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} id: builder1 - name: Set up builder2 - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} id: builder2 - name: Build against builder1 - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: builder: ${{ steps.builder1.outputs.name }} target: mytarget1 - name: Build against builder2 - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: builder: ${{ steps.builder2.outputs.name }} target: mytarget2 diff --git a/content/manuals/build/ci/github-actions/copy-image-registries.md b/content/manuals/build/ci/github-actions/copy-image-registries.md index 4897a070f8f..f0ccb9b9eb3 100644 --- a/content/manuals/build/ci/github-actions/copy-image-registries.md +++ b/content/manuals/build/ci/github-actions/copy-image-registries.md @@ -6,7 +6,7 @@ keywords: ci, github actions, gha, buildkit, buildx, registry --- [Multi-platform images](../../building/multi-platform.md) built using Buildx can -be copied from one registry to another using the [`buildx imagetools create` command](/reference/cli/docker/buildx/imagetools/create.md): +be copied from one registry to another using the [`buildx imagetools create` command](/reference/cli/docker/buildx/imagetools/create/): ```yaml name: ci @@ -19,26 +19,26 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: platforms: linux/amd64,linux/arm64 push: true diff --git a/content/manuals/build/ci/github-actions/export-docker.md b/content/manuals/build/ci/github-actions/export-docker.md index 61489e886dc..366db42df7f 100644 --- a/content/manuals/build/ci/github-actions/export-docker.md +++ b/content/manuals/build/ci/github-actions/export-docker.md @@ -19,10 +19,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: load: true tags: myimage:latest diff --git a/content/manuals/build/ci/github-actions/github-builder/_index.md b/content/manuals/build/ci/github-actions/github-builder/_index.md new file mode 100644 index 00000000000..4322d796880 --- /dev/null +++ b/content/manuals/build/ci/github-actions/github-builder/_index.md @@ -0,0 +1,107 @@ +--- +title: Docker GitHub Builder +linkTitle: GitHub Builder +description: Use Docker-maintained reusable GitHub Actions workflows to build images and artifacts with BuildKit. +keywords: ci, github actions, gha, buildkit, buildx, bake, reusable workflows +params: + sidebar: + badge: + color: green + text: New +--- + +Docker GitHub Builder is a set of [reusable workflows](https://docs.github.com/en/actions/how-tos/reuse-automations/reuse-workflows) +in the [`docker/github-builder` repository](https://github.com/docker/github-builder) +for building container images and local artifacts with [BuildKit](../../../buildkit/_index.md). +This section explains what the workflows solve, how they differ from wiring +together individual GitHub Actions in each repository, and when to use +[`build.yml`](build.md) or [`bake.yml`](bake.md). + +If you compose a build job from `docker/login-action`, `docker/setup-buildx-action`, +`docker/metadata-action`, and either `docker/build-push-action` or +`docker/bake-action`, your repository owns every detail of how the build runs. +That approach works, but it also means every repository has to maintain its own +runner selection, [cache setup](../cache.md), [Provenance settings](../attestations.md), +signing behavior, and [multi-platform manifest handling](../multi-platform.md). +Docker GitHub Builder moves that implementation into Docker-maintained reusable +workflows, so your workflow only decides when to build and which inputs to pass. + +The difference is easiest to see in the job definition. A conventional workflow +spells out each action step: + +```yaml +jobs: + docker: + runs-on: ubuntu-latest + steps: + - name: Login to Docker Hub + uses: docker/login-action@{{% param "login_action_version" %}} + with: + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + + - name: Docker meta + uses: docker/metadata-action@{{% param "metadata_action_version" %}} + id: meta + with: + images: name/app + + - name: Build and push + uses: docker/build-push-action@{{% param "build_push_action_version" %}} + with: + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha +``` + +With Docker GitHub Builder, the same build is a reusable workflow call: + +```yaml +jobs: + build: + uses: docker/github-builder/.github/workflows/build.yml@{{% param "github_builder_version" %}} + permissions: + contents: read # to fetch the repository content + id-token: write # for signing attestation(s) with GitHub OIDC Token + with: + output: image + push: ${{ github.event_name != 'pull_request' }} + meta-images: name/app + secrets: + registry-auths: | + - registry: docker.io + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} +``` + +This model gives you a build pipeline that is maintained in the Docker +organization, uses a pinned [BuildKit](../../../buildkit/_index.md) environment, +distributes [multi-platform builds](../../../building/multi-platform.md) across +runners when that helps, and emits signed [SLSA provenance](../../../metadata/attestations/slsa-provenance.md) +that records both the source commit and the builder identity. + +That tradeoff is intentional. You keep control of when the build runs and which +inputs it uses, but the build implementation itself lives in the +Docker-maintained workflow rather than in per-repository job steps. + +Use [`build.yml`](build.md) when your repository builds from a Dockerfile and +the familiar `build-push-action` inputs map cleanly to your workflow. Use +[`bake.yml`](bake.md) when your repository already describes builds in a +[Bake definition](../../../bake/_index.md), or when you want Bake targets, +overrides, and variables to stay as the source of truth. + +Both workflows support image output, local output, cache export to the +[GitHub Actions cache backend](../../../cache/backends/gha.md), +[SBOM generation](../../../metadata/attestations/sbom.md), and signing. The +Bake workflow adds Bake definition validation and builds one target per workflow +call. + +{{% sectionlinks %}} diff --git a/content/manuals/build/ci/github-actions/github-builder/architecture.md b/content/manuals/build/ci/github-actions/github-builder/architecture.md new file mode 100644 index 00000000000..0c0db641a90 --- /dev/null +++ b/content/manuals/build/ci/github-actions/github-builder/architecture.md @@ -0,0 +1,190 @@ +--- +title: Docker GitHub Builder architecture +linkTitle: Architecture +description: Learn about the architecture of Docker GitHub Builder, a set of reusable workflows for building images and artifacts with BuildKit in GitHub Actions. +keywords: ci, github actions, gha, buildkit, buildx, bake, reusable workflows +weight: 10 +--- + +Docker GitHub Builder separates repository orchestration from build +implementation. A consuming repository decides when a build runs, which +permissions and secrets are granted, and which inputs are passed. The reusable +workflow in [`docker/github-builder` repository](https://github.com/docker/github-builder) +owns the build implementation itself. That split keeps repository workflows +short while centralizing BuildKit, caching, provenance, SBOM generation, +signing, and multi-platform assembly in one Docker-maintained path. + +![GitHub Builder overview](./images/architecture-overview.png) + +## Core architecture + +A caller workflow invokes either [`build.yml`](build.md) or [`bake.yml`](bake.md). +[`build.yml`](build.md) is the entrypoint for Dockerfile-oriented builds. +[`bake.yml`](bake.md) is the entrypoint for Bake-oriented builds, where the +Bake definition remains the source of truth for targets and overrides. In both +cases the caller still owns repository policy, including triggers, branch +conditions, permissions, secrets, target selection, metadata inputs, and the +choice between image output and local output. + +Inside the reusable workflow, the first phase prepares the build. It validates +the incoming inputs, resolves the appropriate runner, and expands a +multi-platform request into one job per platform. The execution model is +easiest to picture as a matrix where `linux/amd64` runs on `ubuntu-24.04` and +`linux/arm64` runs on `ubuntu-24.04-arm`. Each platform job builds independently, +then the workflow finalizes the result into one caller-facing output contract. + +```yaml +requested platforms: + linux/amd64,linux/arm64 + +conceptual platform jobs: + linux/amd64 -> ubuntu-24.04 + linux/arm64 -> ubuntu-24.04-arm +``` + +## Execution path + +![GitHub Builder execution flow](./images/execution-flow.png) + +The execution path stays short on purpose. The consuming repository calls the +reusable workflow. The reusable workflow prepares the build, runs the +per-platform jobs, and finalizes the result. For image output, finalization +produces a registry image and multi-platform manifest. For local output, +finalization merges the per-platform files and can upload the merged result as +a GitHub artifact. The caller does not need to reconstruct how Buildx, +BuildKit, caching, or manifest assembly were wired together. + +## The two reusable entrypoints + +[`build.yml`](build.md) is the better fit when the build is already expressed as +a Dockerfile-oriented workflow. It lines up naturally with concepts such as +`context`, `file`, `target`, `build-args`, `labels`, `annotations`, and +`platforms`. This is the entrypoint that feels closest to +`docker/build-push-action`, except the workflow implementation is centralized. + +[`bake.yml`](bake.md) is the better fit when the repository already uses Bake +as the build definition. It preserves the Bake model, including target +resolution, `files`, `set`, and `vars`, while still routing execution through +the same Docker-maintained build path. One important architectural detail is +that the Bake workflow is centered on one target per workflow call, which keeps +provenance, digest handling, and final manifest assembly scoped to one build +unit at a time. + +## Output model + +The reusable workflows expose a stable set of caller-facing outputs so +downstream jobs can consume results without understanding the internal job +graph. In practice, the main values are `digest`, `meta-json`, `artifact-name`, +`output-type`, and `signed`. That contract matters because it keeps promotion, +publishing, or follow-on automation decoupled from the mechanics of runner +selection and per-platform assembly. + +## Examples + +### Dockerfile-oriented image build + +The following example shows the shape of a multi-platform image build driven +by [`build.yml`](build.md). + +```yaml +name: ci + +on: + push: + branches: + - "main" + tags: + - "v*" + pull_request: + +permissions: + contents: read + +jobs: + build: + uses: docker/github-builder/.github/workflows/build.yml@{{% param "github_builder_version" %}} + permissions: + contents: read + id-token: write + with: + output: image + push: ${{ github.event_name != 'pull_request' }} + platforms: linux/amd64,linux/arm64 + meta-images: name/app + meta-tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + secrets: + registry-auths: | + - registry: docker.io + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} +``` + +This call is small because the reusable workflow absorbs the heavy lifting. The +repository decides when the build should run and which inputs it wants, while +the shared implementation handles Buildx setup, BuildKit configuration, +platform fan-out, metadata generation, provenance, SBOM generation, signing, +and final manifest creation. + +### Bake-oriented local output + +The following example shows the shape of a Bake call that exports local output +and uploads the merged artifact. + +```yaml +name: ci + +on: + pull_request: + +permissions: + contents: read + +jobs: + bake: + uses: docker/github-builder/.github/workflows/bake.yml@{{% param "github_builder_version" %}} + permissions: + contents: read + id-token: write + with: + output: local + target: binaries + artifact-upload: true + artifact-name: bake-output +``` + +This form is useful when the repository already keeps its build definition in +Bake and wants to preserve that source of truth. The workflow injects the local +output behavior into the Bake run, executes the target per platform when +needed, and merges the result into one caller-facing artifact. + +## Why this architecture works + +### Performance + +The performance story comes from native platform fan-out, shared BuildKit +configuration, and centralized cache handling. Multi-platform work can be +spread across matching GitHub-hosted runners instead of forcing every +architecture through one build machine. That reduces emulation pressure, +shortens the critical path for cross-platform builds, and gives every +consuming repository the same optimized build baseline. + +### Security + +The security model comes from putting the build implementation in +Docker-maintained reusable workflows instead of ad hoc job steps in each +consumer repository. The caller still controls permissions and secrets, but +the build logic itself is centrally reviewed and versioned. The project also +treats provenance, SBOM generation, and signing as first-class concerns, +which strengthens the trust boundary between repository orchestration and +artifact production. + +### Isolation and reliability + +The reliability story comes from separation of concerns. The consuming +repository orchestrates the build. The reusable workflow executes the build. +That reduces CI drift, removes repeated glue code from repositories, and makes +the outcome easier to reason about because the caller sees a stable contract +instead of a large custom job definition. diff --git a/content/manuals/build/ci/github-actions/github-builder/bake.md b/content/manuals/build/ci/github-actions/github-builder/bake.md new file mode 100644 index 00000000000..6934baec8c9 --- /dev/null +++ b/content/manuals/build/ci/github-actions/github-builder/bake.md @@ -0,0 +1,146 @@ +--- +title: Bake with Docker GitHub Builder +linkTitle: Bake workflow +description: Use the Docker GitHub Builder bake.yml reusable workflow to build images and local artifacts from a Bake definition. +keywords: ci, github actions, gha, buildkit, buildx, bake, reusable workflow +weight: 30 +--- + +The [`bake.yml` reusable workflow](https://github.com/docker/github-builder?tab=readme-ov-file#bake-reusable-workflow) +builds from a [Bake definition](../../../bake/_index.md) instead of a Dockerfile +input set. This page shows how to call the workflow for a target, how to pass +Bake overrides and variables, and how to export local output when a Bake file +is already the source of truth for your build. + +## Build and push a Bake target + +The following workflow builds the `image` target from `docker-bake.hcl` and +publishes the result with tags generated from [metadata inputs](../manage-tags-labels.md): + +```yaml +name: ci + +on: + push: + branches: + - "main" + tags: + - "v*" + pull_request: + +permissions: + contents: read + +jobs: + bake: + uses: docker/github-builder/.github/workflows/bake.yml@{{% param "github_builder_version" %}} + permissions: + contents: read # to fetch the repository content + id-token: write # for signing attestation(s) with GitHub OIDC Token + with: + output: image + push: ${{ github.event_name != 'pull_request' }} + target: image + meta-images: name/app + meta-tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + secrets: + registry-auths: | + - registry: docker.io + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} +``` + +Bake workflows build one target per workflow call. Groups and multi-target +builds aren't supported because [SLSA provenance](../attestations.md), digest +handling, and manifest creation are scoped to a single target. + +The workflow validates the definition before the build starts and resolves +the target from the files you pass in `files`. + +## Override target values and variables + +Because the workflow delegates the build to Bake, you can keep using `set` and +`vars` for target-specific overrides: + +```yaml +name: ci + +on: + push: + branches: + - "main" + +permissions: + contents: read + +jobs: + bake: + uses: docker/github-builder/.github/workflows/bake.yml@{{% param "github_builder_version" %}} + permissions: + contents: read # to fetch the repository content + id-token: write # for signing attestation(s) with GitHub OIDC Token + with: + output: image + push: true + target: image + vars: | + IMAGE_TAG=${{ github.sha }} + set: | + *.args.BUILD_RUN_ID=${{ github.run_id }} + *.platform=linux/amd64,linux/arm64 + cache: true + cache-scope: image + meta-images: name/app + meta-tags: | + type=sha + set-meta-annotations: true + secrets: + registry-auths: | + - registry: docker.io + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} +``` + +This form fits repositories that already use Bake groups, target inheritance, +and variable expansion. The reusable workflow takes care of Buildx setup, +[GitHub Actions cache export](../../../cache/backends/gha.md), +[Provenance defaults](../../../metadata/attestations/slsa-provenance.md), +signing behavior, and the final multi-platform manifest. Metadata labels and +annotations can be merged into the Bake definition without adding a separate +metadata step to your workflow. + +## Export local output from Bake + +If the target should export files instead of publishing an image, switch the +workflow output to `local` and upload the artifact: + +```yaml +name: ci + +on: + pull_request: + +permissions: + contents: read + +jobs: + bake: + uses: docker/github-builder/.github/workflows/bake.yml@{{% param "github_builder_version" %}} + permissions: + contents: read # to fetch the repository content + id-token: write # for signing attestation(s) with GitHub OIDC Token + with: + output: local + target: binaries + artifact-upload: true + artifact-name: bake-output +``` + +With `output: local`, the workflow injects the matching local output override +into the Bake run and merges the uploaded artifacts after the per-platform +builds finish. If you need a manual Bake pattern that stays in a normal job, +see [Multi-platform image](../multi-platform.md). If your build does not need a +Bake definition, use [build.yml](build.md) instead. diff --git a/content/manuals/build/ci/github-actions/github-builder/build.md b/content/manuals/build/ci/github-actions/github-builder/build.md new file mode 100644 index 00000000000..202739fb58c --- /dev/null +++ b/content/manuals/build/ci/github-actions/github-builder/build.md @@ -0,0 +1,150 @@ +--- +title: Build with Docker GitHub Builder +linkTitle: Build workflow +description: Use the Docker GitHub Builder build.yml reusable workflow to build images and local artifacts from a Dockerfile. +keywords: ci, github actions, gha, buildkit, buildx, reusable workflow, dockerfile +weight: 20 +--- + +The [`build.yml` reusable workflow](https://github.com/docker/github-builder?tab=readme-ov-file#build-reusable-workflow) +builds from a Dockerfile and packages the same core tasks that many repositories +wire together by hand. This page shows how to call the workflow, publish +[multi-platform images](../../../building/multi-platform.md), and export local +build artifacts without rebuilding the job structure in every repository. + +## Build and push an image + +The following workflow builds from the repository Dockerfile, pushes on branch +and tag events, and uses metadata inputs to generate tags: + +```yaml +name: ci + +on: + push: + branches: + - "main" + tags: + - "v*" + pull_request: + +permissions: + contents: read + +jobs: + build: + uses: docker/github-builder/.github/workflows/build.yml@{{% param "github_builder_version" %}} + permissions: + contents: read # to fetch the repository content + id-token: write # for signing attestation(s) with GitHub OIDC Token + with: + output: image + push: ${{ github.event_name != 'pull_request' }} + platforms: linux/amd64,linux/arm64 + meta-images: name/app + meta-tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + secrets: + registry-auths: | + - registry: docker.io + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} +``` + +When you set `output: image`, `meta-images` is required because the workflow +creates image names and [manifest tags](../manage-tags-labels.md) from that +input. `runner: auto` and `distribute: true` are the defaults, so a +multi-platform build can fan out across native GitHub-hosted runners instead +of forcing the whole build onto one machine. `sign: auto` is also the default, +which means the workflow signs [attestation manifests](../attestations.md) +when the image is pushed. + +## Export local output as an artifact + +The same workflow can export files instead of publishing an image. This is +useful when you want compiled assets, an unpacked root filesystem, or another +local exporter result as part of CI: + +```yaml +name: ci + +on: + pull_request: + +permissions: + contents: read + +jobs: + build: + uses: docker/github-builder/.github/workflows/build.yml@{{% param "github_builder_version" %}} + permissions: + contents: read # to fetch the repository content + id-token: write # for signing attestation(s) with GitHub OIDC Token + with: + output: local + artifact-upload: true + artifact-name: build-output + platforms: linux/amd64,linux/arm64 +``` + +With `output: local`, the workflow exports files to the runner filesystem and +merges per-platform artifacts in the finalize phase. When +`artifact-upload: true` is set, the merged result is uploaded as a GitHub +artifact, and `sign: auto` signs the uploaded artifacts. `push` is ignored for +local output, so there is no registry requirement in this form. + +## Add cache, Dockerfile inputs, and metadata labels + +You can tune the Dockerfile build in the same job call. This example sets a +custom Dockerfile path, a target stage, GitHub Actions cache, and metadata +labels: + +```yaml +name: ci + +on: + push: + branches: + - "main" + +permissions: + contents: read + +jobs: + build: + uses: docker/github-builder/.github/workflows/build.yml@{{% param "github_builder_version" %}} + permissions: + contents: read # to fetch the repository content + id-token: write # for signing attestation(s) with GitHub OIDC Token + with: + output: image + push: true + context: . + file: ./docker/Dockerfile + target: runtime + build-args: | + NODE_ENV=production + VERSION=${{ github.sha }} + cache: true + cache-scope: myapp + meta-images: name/app + meta-tags: | + type=sha + set-meta-labels: true + secrets: + registry-auths: | + - registry: docker.io + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} +``` + +This is a Dockerfile build, so the inputs map closely to +`docker/build-push-action`. The difference is that the reusable workflow owns +Buildx setup, [BuildKit](../../../buildkit/_index.md) configuration, +[SLSA provenance](../../../metadata/attestations/slsa-provenance.md) mode, +[GitHub Actions cache backend](../../../cache/backends/gha.md) wiring, signing, +and manifest creation. If you need more background on metadata or platform +distribution, see [Manage tags and labels](../manage-tags-labels.md) and +[Multi-platform image](../multi-platform.md). diff --git a/content/manuals/build/ci/github-actions/github-builder/images/architecture-overview.png b/content/manuals/build/ci/github-actions/github-builder/images/architecture-overview.png new file mode 100644 index 00000000000..801ff48faf5 Binary files /dev/null and b/content/manuals/build/ci/github-actions/github-builder/images/architecture-overview.png differ diff --git a/content/manuals/build/ci/github-actions/github-builder/images/execution-flow.png b/content/manuals/build/ci/github-actions/github-builder/images/execution-flow.png new file mode 100644 index 00000000000..7d4765f86a1 Binary files /dev/null and b/content/manuals/build/ci/github-actions/github-builder/images/execution-flow.png differ diff --git a/content/manuals/build/ci/github-actions/local-registry.md b/content/manuals/build/ci/github-actions/local-registry.md index 807b7e7c320..638cfd5c1ff 100644 --- a/content/manuals/build/ci/github-actions/local-registry.md +++ b/content/manuals/build/ci/github-actions/local-registry.md @@ -19,20 +19,20 @@ jobs: runs-on: ubuntu-latest services: registry: - image: registry:2 + image: registry:3 ports: - 5000:5000 steps: - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: driver-opts: network=host - name: Build and push to local registry - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true tags: localhost:5000/name/app:latest diff --git a/content/manuals/build/ci/github-actions/manage-tags-labels.md b/content/manuals/build/ci/github-actions/manage-tags-labels.md index 00d2082585b..5e3f0e19158 100644 --- a/content/manuals/build/ci/github-actions/manage-tags-labels.md +++ b/content/manuals/build/ci/github-actions/manage-tags-labels.md @@ -29,7 +29,7 @@ jobs: steps: - name: Docker meta id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@{{% param "metadata_action_version" %}} with: # list of Docker images to use as base name for tags images: | @@ -47,27 +47,27 @@ jobs: - name: Login to Docker Hub if: github.event_name != 'pull_request' - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GHCR if: github.event_name != 'pull_request' - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} diff --git a/content/manuals/build/ci/github-actions/multi-platform.md b/content/manuals/build/ci/github-actions/multi-platform.md index 787abccda09..cc64d180349 100644 --- a/content/manuals/build/ci/github-actions/multi-platform.md +++ b/content/manuals/build/ci/github-actions/multi-platform.md @@ -26,19 +26,19 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: platforms: linux/amd64,linux/arm64 push: true @@ -47,10 +47,7 @@ jobs: ## Build and load multi-platform images -The default Docker setup for GitHub Actions runners does not support loading -multi-platform images to the local image store of the runner after building -them. To load a multi-platform image, you need to enable the containerd image -store option for the Docker Engine. +The default Docker setup for GitHub Actions runners supports building and pushing multi-platform images to registries. However, it does not support loading multi-platform images to the local image store of the runner after building them. To load a multi-platform image locally, you need to enable the containerd image store option for the Docker Engine. There is no way to configure the default Docker setup in the GitHub Actions runners directly, but you can use `docker/setup-docker-action` to customize the @@ -71,7 +68,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker - uses: docker/setup-docker-action@v4 + uses: docker/setup-docker-action@{{% param "setup_docker_action_version" %}} with: daemon-config: | { @@ -82,16 +79,16 @@ jobs: } - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: platforms: linux/amd64,linux/arm64 load: true @@ -100,17 +97,16 @@ jobs: ## Distribute build across multiple runners -In the previous example, each platform is built on the same runner which can -take a long time depending on the number of platforms and your Dockerfile. +Building multiple platforms on the same runner can significantly extend build +times, particularly when dealing with complex Dockerfiles or a high number of +target platforms. If you want to split platform builds across multiple runners +without maintaining a custom matrix and merge job, use the +[Docker GitHub Builder](github-builder/_index.md). The reusable workflows +compute the per-platform matrix, run each platform on its own runner, and +create the final manifest for you. -To solve this issue you can use a matrix strategy to distribute the build for -each platform across multiple runners and create manifest list using the -[`buildx imagetools create` command](/reference/cli/docker/buildx/imagetools/create.md). - -The following workflow will build the image for each platform on a dedicated -runner using a matrix strategy and push by digest. Then, the `merge` job will -create manifest lists and push them to Docker Hub. The [`metadata` action](https://github.com/docker/metadata-action) -is used to set tags and labels. +The following workflow uses the [`build.yml` reusable workflow](github-builder/build.md) +to distribute a multi-platform Dockerfile build: ```yaml name: ci @@ -118,117 +114,43 @@ name: ci on: push: -env: - REGISTRY_IMAGE: user/app +permissions: + contents: read jobs: build: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - platform: - - linux/amd64 - - linux/arm64 - steps: - - name: Prepare - run: | - platform=${{ matrix.platform }} - echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY_IMAGE }} - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build and push by digest - id: build - uses: docker/build-push-action@v6 - with: - platforms: ${{ matrix.platform }} - labels: ${{ steps.meta.outputs.labels }} - tags: ${{ env.REGISTRY_IMAGE }} - outputs: type=image,push-by-digest=true,name-canonical=true,push=true - - - name: Export digest - run: | - mkdir -p ${{ runner.temp }}/digests - digest="${{ steps.build.outputs.digest }}" - touch "${{ runner.temp }}/digests/${digest#sha256:}" - - - name: Upload digest - uses: actions/upload-artifact@v4 - with: - name: digests-${{ env.PLATFORM_PAIR }} - path: ${{ runner.temp }}/digests/* - if-no-files-found: error - retention-days: 1 - - merge: - runs-on: ubuntu-latest - needs: - - build - steps: - - name: Download digests - uses: actions/download-artifact@v4 - with: - path: ${{ runner.temp }}/digests - pattern: digests-* - merge-multiple: true - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: + uses: docker/github-builder/.github/workflows/build.yml@{{% param "github_builder_version" %}} + permissions: + contents: read + id-token: write + with: + output: image + push: true + platforms: linux/amd64,linux/arm64 + meta-images: user/app + meta-tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + secrets: + registry-auths: | + - registry: docker.io username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY_IMAGE }} - tags: | - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - - - name: Create manifest list and push - working-directory: ${{ runner.temp }}/digests - run: | - docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) - - - name: Inspect image - run: | - docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} ``` -### With Bake - -It's also possible to build on multiple runners using Bake, with the -[bake action](https://github.com/docker/bake-action). +With `runner: auto` and `distribute: true`, which are the defaults, the +workflow splits the build into one platform per runner and assembles the final +multi-platform image in its finalize phase. If you need to control the Docker +build inputs directly, see [Build with Docker GitHub Builder build.yml](github-builder/build.md). -You can find a live example [in this GitHub repository](https://github.com/crazy-max/docker-linguist). +### With Bake -The following example achieves the same results as described in -[the previous section](#distribute-build-across-multiple-runners). +You can use the [`bake.yml` reusable workflow](github-builder/bake.md) for the +same pattern when your build is defined in a Bake file. The workflow reads the +target platforms from the Bake definition, distributes the per-platform builds, +and publishes the final manifest without a separate prepare or merge job. ```hcl variable "DEFAULT_TAG" { @@ -271,138 +193,26 @@ name: ci on: push: -env: - REGISTRY_IMAGE: user/app +permissions: + contents: read jobs: - prepare: - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.platforms.outputs.matrix }} - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Create matrix - id: platforms - run: | - echo "matrix=$(docker buildx bake image-all --print | jq -cr '.target."image-all".platforms')" >>${GITHUB_OUTPUT} - - - name: Show matrix - run: | - echo ${{ steps.platforms.outputs.matrix }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY_IMAGE }} - - - name: Rename meta bake definition file - run: | - mv "${{ steps.meta.outputs.bake-file }}" "${{ runner.temp }}/bake-meta.json" - - - name: Upload meta bake definition - uses: actions/upload-artifact@v4 - with: - name: bake-meta - path: ${{ runner.temp }}/bake-meta.json - if-no-files-found: error - retention-days: 1 - - build: - runs-on: ubuntu-latest - needs: - - prepare - strategy: - fail-fast: false - matrix: - platform: ${{ fromJson(needs.prepare.outputs.matrix) }} - steps: - - name: Prepare - run: | - platform=${{ matrix.platform }} - echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV - - - name: Download meta bake definition - uses: actions/download-artifact@v4 - with: - name: bake-meta - path: ${{ runner.temp }} - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build - id: bake - uses: docker/bake-action@v6 - with: - files: | - ./docker-bake.hcl - cwd://${{ runner.temp }}/bake-meta.json - targets: image - set: | - *.tags=${{ env.REGISTRY_IMAGE }} - *.platform=${{ matrix.platform }} - *.output=type=image,push-by-digest=true,name-canonical=true,push=true - - - name: Export digest - run: | - mkdir -p ${{ runner.temp }}/digests - digest="${{ fromJSON(steps.bake.outputs.metadata).image['containerimage.digest'] }}" - touch "${{ runner.temp }}/digests/${digest#sha256:}" - - - name: Upload digest - uses: actions/upload-artifact@v4 - with: - name: digests-${{ env.PLATFORM_PAIR }} - path: ${{ runner.temp }}/digests/* - if-no-files-found: error - retention-days: 1 - - merge: - runs-on: ubuntu-latest - needs: - - build - steps: - - name: Download meta bake definition - uses: actions/download-artifact@v4 - with: - name: bake-meta - path: ${{ runner.temp }} - - - name: Download digests - uses: actions/download-artifact@v4 - with: - path: ${{ runner.temp }}/digests - pattern: digests-* - merge-multiple: true - - - name: Login to DockerHub - uses: docker/login-action@v3 - with: + bake: + uses: docker/github-builder/.github/workflows/bake.yml@{{% param "github_builder_version" %}} + permissions: + contents: read + id-token: write + with: + output: image + push: true + target: image-all + meta-images: user/app + meta-tags: | + type=ref,event=branch + type=sha + secrets: + registry-auths: | + - registry: docker.io username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Create manifest list and push - working-directory: ${{ runner.temp }}/digests - run: | - docker buildx imagetools create $(jq -cr '.target."docker-metadata-action".tags | map(select(startswith("${{ env.REGISTRY_IMAGE }}")) | "-t " + .) | join(" ")' ${{ runner.temp }}/bake-meta.json) \ - $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) - - - name: Inspect image - run: | - docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:$(jq -r '.target."docker-metadata-action".args.DOCKER_META_VERSION' ${{ runner.temp }}/bake-meta.json) ``` diff --git a/content/manuals/build/ci/github-actions/named-contexts.md b/content/manuals/build/ci/github-actions/named-contexts.md index 0419bae733a..fa0da022ea9 100644 --- a/content/manuals/build/ci/github-actions/named-contexts.md +++ b/content/manuals/build/ci/github-actions/named-contexts.md @@ -5,7 +5,7 @@ description: Use additional contexts in multi-stage builds with GitHub Actions keywords: ci, github actions, gha, buildkit, buildx, context --- -You can define [additional build contexts](/reference/cli/docker/buildx/build.md#build-context), +You can define [additional build contexts](/reference/cli/docker/buildx/build/#build-context), and access them in your Dockerfile with `FROM name` or `--from=name`. When Dockerfile defines a stage with the same name it's overwritten. @@ -33,10 +33,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: build-contexts: | alpine=docker-image://alpine:{{% param "example_alpine_version" %}} @@ -68,19 +68,19 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: driver: docker - name: Build base image - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: context: "{{defaultContext}}:base" load: true tags: my-base-image:latest - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: build-contexts: | alpine=docker-image://my-base-image:latest @@ -112,28 +112,28 @@ jobs: runs-on: ubuntu-latest services: registry: - image: registry:2 + image: registry:3 ports: - 5000:5000 steps: - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} with: # network=host driver-opt needed to push to local registry driver-opts: network=host - name: Build base image - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: context: "{{defaultContext}}:base" tags: localhost:5000/my-base-image:latest push: true - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: build-contexts: | alpine=docker-image://localhost:5000/my-base-image:latest diff --git a/content/manuals/build/ci/github-actions/push-multi-registries.md b/content/manuals/build/ci/github-actions/push-multi-registries.md index 617b39d7cff..e80bbc456b4 100644 --- a/content/manuals/build/ci/github-actions/push-multi-registries.md +++ b/content/manuals/build/ci/github-actions/push-multi-registries.md @@ -19,26 +19,26 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: platforms: linux/amd64,linux/arm64 push: true diff --git a/content/manuals/build/ci/github-actions/reproducible-builds.md b/content/manuals/build/ci/github-actions/reproducible-builds.md index 038c64020ac..0649ca53493 100644 --- a/content/manuals/build/ci/github-actions/reproducible-builds.md +++ b/content/manuals/build/ci/github-actions/reproducible-builds.md @@ -33,10 +33,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: tags: user/app:latest env: @@ -57,10 +57,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build - uses: docker/bake-action@v6 + uses: docker/bake-action@{{% param "bake_action_version" %}} env: SOURCE_DATE_EPOCH: 0 ``` @@ -86,13 +86,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: tags: user/app:latest env: @@ -113,13 +113,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - name: Build - uses: docker/bake-action@v6 + uses: docker/bake-action@{{% param "bake_action_version" %}} env: SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }} ``` diff --git a/content/manuals/build/ci/github-actions/secrets.md b/content/manuals/build/ci/github-actions/secrets.md index e24ea725d56..40c2c0ac330 100644 --- a/content/manuals/build/ci/github-actions/secrets.md +++ b/content/manuals/build/ci/github-actions/secrets.md @@ -14,7 +14,7 @@ Docker Build supports two forms of secrets: - [SSH mounts](#ssh-mounts) add SSH agent sockets or keys into the build container. This page shows how to use secrets with GitHub Actions. -For an introduction to secrets in general, see [Build secrets](../../building/secrets.md). +For an introduction to secrets in general, see [Build secrets](/manuals/build/building/secrets.md). ## Secret mounts @@ -43,13 +43,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: platforms: linux/amd64,linux/arm64 tags: user/app:latest @@ -58,13 +58,125 @@ jobs: ``` > [!NOTE] -> -> You can also expose a secret file to the build with the `secret-files` input: -> -> ```yaml -> secret-files: | -> "MY_SECRET=./secret.txt" -> ``` +> Secrets are mounted as files in the build container. +> By default, they're available at `/run/secrets/`. +> You can also use the `env` option to load a secret into an environment variable, +> or the `target` option to customize the mount path. +> For details on secret mounts, see [Build secrets](/manuals/build/building/secrets.md). + +### Using secret files + +The `secret-files` input lets you mount existing files as secrets in your build. +This is useful when you need to use credential files that are generated during your workflow, +or when you need to mount configuration files like `.npmrc` or `.pypirc` that are already in the expected format. + +The key difference between `secrets` and `secret-files`: + +- `secrets`: Pass secret values as strings (from environment variables or GitHub secrets) +- `secret-files`: Mount existing files from the runner's filesystem + +#### Example: Using .npmrc for private npm packages + +If your build needs to install packages from a private npm registry, +you can create an `.npmrc` file and mount it as a secret: + +```yaml +name: ci + +on: + push: + +jobs: + docker: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@{{% param "checkout_action_version" %}} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + + - name: Create .npmrc file + run: | + echo "//registry.npmjs.org/:_authToken=${{ secrets.NPM_TOKEN }}" > .npmrc + + - name: Build + uses: docker/build-push-action@{{% param "build_push_action_version" %}} + with: + context: . + secret-files: | + npmrc=./.npmrc + tags: user/app:latest +``` + +In your Dockerfile, mount the secret file to the expected location: + +```dockerfile +# syntax=docker/dockerfile:1 +FROM node:20-alpine + +WORKDIR /app + +COPY package*.json ./ + +RUN --mount=type=secret,id=npmrc,target=/root/.npmrc \ + npm ci + +COPY . . + +RUN npm run build +``` + +#### Example: Using dynamically generated credentials + +You can generate credential files from multiple secrets and mount them: + +```yaml +name: ci + +on: + push: + +jobs: + docker: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@{{% param "checkout_action_version" %}} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + + - name: Create credentials file + run: | + cat < aws-credentials + [default] + aws_access_key_id = ${{ secrets.AWS_ACCESS_KEY_ID }} + aws_secret_access_key = ${{ secrets.AWS_SECRET_ACCESS_KEY }} + EOF + + - name: Build + uses: docker/build-push-action@{{% param "build_push_action_version" %}} + with: + context: . + secret-files: | + aws=./aws-credentials + tags: user/app:latest +``` + +In your Dockerfile: + +```dockerfile +# syntax=docker/dockerfile:1 +FROM alpine + +RUN apk add --no-cache aws-cli + +RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \ + aws s3 cp s3://my-private-bucket/data.tar.gz /tmp/ +``` + +### Multi-line secrets If you're using [GitHub secrets](https://docs.github.com/en/actions/security-guides/encrypted-secrets) and need to handle multi-line value, you will need to place the key-value pair @@ -178,7 +290,7 @@ jobs: private-key-name: github-ppk - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: ssh: default push: true @@ -206,7 +318,7 @@ jobs: private-key-name: github-ppk - name: Build - uses: docker/bake-action@v6 + uses: docker/bake-action@{{% param "bake_action_version" %}} with: set: | *.ssh=default diff --git a/content/manuals/build/ci/github-actions/share-image-jobs.md b/content/manuals/build/ci/github-actions/share-image-jobs.md index 0fb11c21957..c6615ca7fa5 100644 --- a/content/manuals/build/ci/github-actions/share-image-jobs.md +++ b/content/manuals/build/ci/github-actions/share-image-jobs.md @@ -24,10 +24,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build and export - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: tags: myimage:latest outputs: type=docker,dest=${{ runner.temp }}/myimage.tar diff --git a/content/manuals/build/ci/github-actions/test-before-push.md b/content/manuals/build/ci/github-actions/test-before-push.md index 05c5f824b26..7aee48ac451 100644 --- a/content/manuals/build/ci/github-actions/test-before-push.md +++ b/content/manuals/build/ci/github-actions/test-before-push.md @@ -28,19 +28,19 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build and export to Docker - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: load: true tags: ${{ env.TEST_TAG }} @@ -50,7 +50,7 @@ jobs: docker run --rm ${{ env.TEST_TAG }} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: platforms: linux/amd64,linux/arm64 push: true diff --git a/content/manuals/build/ci/github-actions/update-dockerhub-desc.md b/content/manuals/build/ci/github-actions/update-dockerhub-desc.md index 6c49dc3ce88..c3f5e9d35a6 100644 --- a/content/manuals/build/ci/github-actions/update-dockerhub-desc.md +++ b/content/manuals/build/ci/github-actions/update-dockerhub-desc.md @@ -20,19 +20,19 @@ jobs: runs-on: ubuntu-latest steps: - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@{{% param "login_action_version" %}} with: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@{{% param "setup_qemu_action_version" %}} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@{{% param "build_push_action_version" %}} with: push: true tags: user/app:latest diff --git a/content/manuals/build/concepts/context.md b/content/manuals/build/concepts/context.md index 2818bec9b7b..dffebe9f398 100644 --- a/content/manuals/build/concepts/context.md +++ b/content/manuals/build/concepts/context.md @@ -245,6 +245,50 @@ docker build github.com/docker/buildx#d4f088e689b41353d74f1a0bfcd6d7c0b213aed2 docker build github.com/docker/buildx#d4f088e ``` +#### URL queries + +{{< summary-bar feature_name="Build URL Queries" >}} + +URL queries are more structured and recommended over [URL fragments](#url-fragments): + +```console +$ docker buildx build 'https://github.com/user/myrepo.git?branch=container&subdir=docker' +``` + +| Build syntax suffix | Commit used | Build context used | +| -------------------------------------------- | ----------------------------- | ------------------ | +| `myrepo.git` | `refs/heads/` | `/` | +| `myrepo.git?tag=mytag` | `refs/tags/mytag` | `/` | +| `myrepo.git?branch=mybranch` | `refs/heads/mybranch` | `/` | +| `myrepo.git?ref=pull/42/head` | `refs/pull/42/head` | `/` | +| `myrepo.git?subdir=myfolder` | `refs/heads/` | `/myfolder` | +| `myrepo.git?branch=master&subdir=myfolder` | `refs/heads/master` | `/myfolder` | +| `myrepo.git?tag=mytag&subdir=myfolder` | `refs/tags/mytag` | `/myfolder` | +| `myrepo.git?branch=mybranch&subdir=myfolder` | `refs/heads/mybranch` | `/myfolder` | + +A commit hash can be specified as a `checksum` (alias `commit`) query, along with +`tag`, `branch`, or `ref` queries to verify that the reference resolves to the +expected commit: + +```console +$ docker buildx build 'https://github.com/moby/buildkit.git?tag=v0.21.1&checksum=66735c67' +``` + +If it doesn't match, the build fails: + +```console +$ docker buildx build 'https://github.com/user/myrepo.git?tag=v0.1.0&commit=deadbeef' +... +#3 [internal] load git source https://github.com/user/myrepo.git?tag=v0.1.0-rc1&commit=deadbeef +#3 0.484 bb41e835b6c3523c7c45b248cf4b45e7f862bc42 refs/tags/v0.1.0 +#3 ERROR: expected checksum to match deadbeef, got bb41e835b6c3523c7c45b248cf4b45e7f862bc42 +``` + +> [!NOTE] +> +> Short commit hash is supported with `checksum` (alias `commit`) query but for +> `ref`, only the full hash of the commit is supported. + #### Keep `.git` directory By default, BuildKit doesn't keep the `.git` directory when using Git contexts. @@ -275,7 +319,7 @@ either SSH or token-based authentication. Buildx automatically detects and uses SSH credentials if the Git context you specify is an SSH or Git address. By default, this uses `$SSH_AUTH_SOCK`. You can configure the SSH credentials to use with the -[`--ssh` flag](/reference/cli/docker/buildx/build.md#ssh). +[`--ssh` flag](/reference/cli/docker/buildx/build/#ssh). ```console $ docker buildx build --ssh default git@github.com:user/private.git @@ -283,7 +327,7 @@ $ docker buildx build --ssh default git@github.com:user/private.git If you want to use token-based authentication instead, you can pass the token using the -[`--secret` flag](/reference/cli/docker/buildx/build.md#secret). +[`--secret` flag](/reference/cli/docker/buildx/build/#secret). ```console $ GIT_AUTH_TOKEN= docker buildx build \ @@ -498,6 +542,7 @@ The following code snippet shows an example `.dockerignore` file. */*/temp* temp? ``` + This file causes the following build behavior: @@ -508,6 +553,8 @@ This file causes the following build behavior: | `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | | `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. | + + Matching is done using Go's [`filepath.Match` function](https://golang.org/pkg/path/filepath#Match) rules. A preprocessing step uses Go's @@ -755,5 +802,5 @@ $ docker buildx bake app For more information about working with named contexts, see: -- [`--build-context` CLI reference](/reference/cli/docker/buildx/build.md#build-context) +- [`--build-context` CLI reference](/reference/cli/docker/buildx/build/#build-context) - [Using Bake with additional contexts](/manuals/build/bake/contexts.md) diff --git a/content/manuals/build/concepts/dockerfile.md b/content/manuals/build/concepts/dockerfile.md index 18601a9b15d..1d0893c531e 100644 --- a/content/manuals/build/concepts/dockerfile.md +++ b/content/manuals/build/concepts/dockerfile.md @@ -4,10 +4,12 @@ weight: 20 description: Learn about Dockerfiles and how to use them with Docker Images to build and package your software keywords: build, buildx, buildkit, getting started, dockerfile aliases: -- /build/hellobuild/ -- /build/building/packaging/ + - /build/hellobuild/ + - /build/building/packaging/ --- + + ## Dockerfile It all starts with a Dockerfile. @@ -19,8 +21,8 @@ reference in the [Dockerfile reference](/reference/dockerfile.md). Here are the most common types of instructions: -| Instruction | Description | -| ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Instruction | Description | +| --------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [`FROM `](/reference/dockerfile.md#from) | Defines a base for your image. | | [`RUN `](/reference/dockerfile.md#run) | Executes any commands in a new layer on top of the current image and commits the result. `RUN` also has a shell form for running commands. | | [`WORKDIR `](/reference/dockerfile.md#workdir) | Sets the working directory for any `RUN`, `CMD`, `ENTRYPOINT`, `COPY`, and `ADD` instructions that follow it in the Dockerfile. | @@ -41,7 +43,7 @@ Some projects may need distinct Dockerfiles for specific purposes. A common convention is to name these `.Dockerfile`. You can specify the Dockerfile filename using the `--file` flag for the `docker build` command. Refer to the -[`docker build` CLI reference](/reference/cli/docker/buildx/build.md#file) +[`docker build` CLI reference](/reference/cli/docker/buildx/build/#file) to learn about the `--file` flag. > [!NOTE] @@ -146,7 +148,7 @@ use this notation to name your images. There are many public images you can leverage in your projects, by importing them into your build steps using the Dockerfile `FROM` instruction. -[Docker Hub](https://hub.docker.com/search?image_filter=official&q=&type=image) +[Docker Hub](https://hub.docker.com/search?badges=official) contains a large set of official images that you can use for this purpose. ### Environment setup @@ -167,7 +169,7 @@ the container. Note the `# install app dependencies` line. This is a comment. Comments in Dockerfiles begin with the `#` symbol. As your Dockerfile evolves, comments can be instrumental to document how your Dockerfile works for any future readers -and editors of the file, including your future self! +and editors of the file, including your future self. > [!NOTE] > @@ -194,7 +196,7 @@ use the command to install the flask web framework. The next instruction uses the [`COPY` instruction](/reference/dockerfile.md#copy) to copy the -`hello.py` file from the local build context into the root directory of our image. +`hello.py` file from the local build context into the root directory of our image. ```dockerfile COPY hello.py / @@ -281,5 +283,5 @@ Docker host. > [!TIP] > -> Want a better editing experience for Dockerfiles in VS Code? -> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. +> To improve linting, code navigation, and vulnerability scanning of your Dockerfiles in Visual Studio Code +> see the [Docker DX](https://marketplace.visualstudio.com/items?itemName=docker.docker) extension. diff --git a/content/manuals/build/debug/opentelemetry.md b/content/manuals/build/debug/opentelemetry.md index 575d8fc1573..ad635daf05c 100644 --- a/content/manuals/build/debug/opentelemetry.md +++ b/content/manuals/build/debug/opentelemetry.md @@ -29,7 +29,7 @@ $ docker buildx create --use \ --driver-opt "env.JAEGER_TRACE=localhost:6831" ``` -Boot and [inspect `mybuilder`](/reference/cli/docker/buildx/inspect.md): +Boot and [inspect `mybuilder`](/reference/cli/docker/buildx/inspect/): ```console $ docker buildx inspect --bootstrap diff --git a/content/manuals/build/exporters/_index.md b/content/manuals/build/exporters/_index.md index 2921fb1ff31..57f465191ad 100644 --- a/content/manuals/build/exporters/_index.md +++ b/content/manuals/build/exporters/_index.md @@ -10,7 +10,7 @@ aliases: Exporters save your build results to a specified output type. You specify the exporter to use with the -[`--output` CLI option](/reference/cli/docker/buildx/build.md#output). +[`--output` CLI option](/reference/cli/docker/buildx/build/#output). Buildx supports the following exporters: - `image`: exports the build result to a container image. @@ -222,8 +222,8 @@ The common parameters described here are: When you export a compressed output, you can configure the exact compression algorithm and level to use. While the default values provide a good -out-of-the-box experience, you may wish to tweak the parameters to optimize for -storage vs compute costs. Changing the compression parameters can reduce storage +out-of-the-box experience, you can tweak the parameters to optimize for +storage versus compute costs. Changing the compression parameters can reduce storage space required, and improve image download times, but will increase build times. To select the compression algorithm, you can use the `compression` option. For @@ -252,6 +252,22 @@ the previous compression algorithm. > The `gzip` and `estargz` compression methods use the [`compress/gzip` package](https://pkg.go.dev/compress/gzip), > while `zstd` uses the [`github.com/klauspost/compress/zstd` package](https://github.com/klauspost/compress/tree/master/zstd). +#### zstd compression levels + +When you specify `compression=zstd`, the `compression-level` parameter accepts +values from 0 to 22. BuildKit maps these values to four internal compression +levels: + +| compression-level | Internal level | Approximate zstd level | Description | +| ----------------- | -------------- | ---------------------- | ------------------------------------- | +| 0-2 | Fastest | ~1 | Fastest compression, larger file size | +| 3-6 (default) | Default | ~3 | Balanced compression and speed | +| 7-8 | Better | ~7 | Better compression, slower | +| 9-22 | Best | ~11 | Best compression, slowest | + +For example, setting `compression-level=5` and `compression-level=6` produces +the same compression output, since both map to the "Default" internal level. + ### OCI media types The `image`, `registry`, `oci` and `docker` exporters create container images. diff --git a/content/manuals/build/exporters/local-tar.md b/content/manuals/build/exporters/local-tar.md index dca9f3ab656..b2138da08f2 100644 --- a/content/manuals/build/exporters/local-tar.md +++ b/content/manuals/build/exporters/local-tar.md @@ -25,9 +25,109 @@ $ docker buildx build --output type=tar[,parameters] . The following table describes the available parameters: -| Parameter | Type | Default | Description | -| --------- | ------ | ------- | --------------------- | -| `dest` | String | | Path to copy files to | +| Parameter | Type | Default | Description | +| ---------------- | ------- | ------- | --------------------------------------------------------------------------------- | +| `dest` | String | | Path to copy files to | +| `platform-split` | Boolean | `true` | `local` exporter only. Split multi-platform outputs into platform subdirectories. | + +## Multi-platform builds with local exporter + +The `platform-split` parameter controls how multi-platform build outputs are +organized. + +Consider this Dockerfile that creates platform-specific files: + +```dockerfile +FROM busybox AS build +ARG TARGETOS +ARG TARGETARCH +RUN mkdir /out && echo foo > /out/hello-$TARGETOS-$TARGETARCH + +FROM scratch +COPY --from=build /out / +``` + +### Split by platform (default) + +By default, the local exporter creates a separate subdirectory for each +platform: + +```console +$ docker buildx build \ + --platform linux/amd64,linux/arm64 \ + --output type=local,dest=./output \ + . +``` + +This produces the following directory structure: + +```text +output/ +├── linux_amd64/ +│ └── hello-linux-amd64 +└── linux_arm64/ + └── hello-linux-arm64 +``` + +### Merge all platforms + +To merge files from all platforms into the same directory, set +`platform-split=false`: + +```console +$ docker buildx build \ + --platform linux/amd64,linux/arm64 \ + --output type=local,dest=./output,platform-split=false \ + . +``` + +This produces a flat directory structure: + +```text +output/ +├── hello-linux-amd64 +└── hello-linux-arm64 +``` + +Files from all platforms merge into a single directory. If multiple platforms +produce files with identical names, the export fails with an error. + +### Single-platform builds + +Single-platform builds export directly to the destination directory without +creating a platform subdirectory: + +```console +$ docker buildx build \ + --platform linux/amd64 \ + --output type=local,dest=./output \ + . +``` + +This produces: + +```text +output/ +└── hello-linux-amd64 +``` + +To include the platform subdirectory even for single-platform builds, explicitly +set `platform-split=true`: + +```console +$ docker buildx build \ + --platform linux/amd64 \ + --output type=local,dest=./output,platform-split=true \ + . +``` + +This produces: + +```text +output/ +└── linux_amd64/ + └── hello-linux-amd64 +``` ## Further reading diff --git a/content/manuals/build/exporters/oci-docker.md b/content/manuals/build/exporters/oci-docker.md index fdd49b550b1..b9d13be064d 100644 --- a/content/manuals/build/exporters/oci-docker.md +++ b/content/manuals/build/exporters/oci-docker.md @@ -40,10 +40,12 @@ The following table describes the available parameters: | `force-compression` | `true`,`false` | `false` | Forcefully apply compression, see [compression][1] | | `oci-mediatypes` | `true`,`false` | | Use OCI media types in exporter manifests. Defaults to `true` for `type=oci`, and `false` for `type=docker`. See [OCI Media types][2] | | `annotation.` | String | | Attach an annotation with the respective `key` and `value` to the built image,see [annotations][3] | +| `rewrite-timestamp` | `true`,`false` | `false` | Rewrite the file timestamps to the `SOURCE_DATE_EPOCH` value. See [build reproducibility][4] for how to specify the `SOURCE_DATE_EPOCH` value. | [1]: _index.md#compression [2]: _index.md#oci-media-types [3]: #annotations +[4]: https://github.com/moby/buildkit/blob/master/docs/build-repro.md ## Annotations diff --git a/content/manuals/build/images/build-variables.svg b/content/manuals/build/images/build-variables.svg index 13197975fb1..07dab5f2d32 100644 --- a/content/manuals/build/images/build-variables.svg +++ b/content/manuals/build/images/build-variables.svg @@ -1,3 +1,3 @@ - Global scope# Build arguments declared here are in the global scopeARG GLOBAL_ARG="global default value"ARG VERSION="3.19"# You can't declare environment variables in the global scopeENV GLOBAL_ENV=false# GLOBAL_ARG was not redeclared in this stageRUN echo $GLOBAL_ARG# LOCAL_ARG was declared in stage-aRUN echo $LOCAL_ARGstage-bFROM --platform=$BUILDPLATFORM alpine:${VERSION} as stage-bstage-a# FROM-lines belong to the global scope and have access to global ARGsFROM alpine:${VERSION} as stage-a# Redeclaring GLOBAL_ARG without a value inherits the global defaultARG GLOBAL_ARGRUN echo $GLOBAL_ARG# ARG here this scope creates a local argumentARG LOCAL_ARG="local arg in stage-a"# Set an environment variable in this scopeENV LOCAL_ENV=true# Set an environment variable to the value of a build argumentENV MY_VAR=$LOCAL_ARGstage-c# New stage based on "stage-a"FROM stage-a AS stage-c# Arguments and variables are inherited from parent stagesRUN echo $LOCAL_ARGRUN echo $LOCAL_ENV<- prints an empty string<- prints an empty string<- prints "global default value"<- prints "local arg in stage-a"<- prints "true"ARG TARGETPLATFORM# You must redeclare pre-defined arguments to use them in a stageRUN echo $TARGETPLATFORM<- prints os/arch/variant of --platform# Pre-defined multi-platform arguments like $BUILDPLATFORM are global + Global scope# Build arguments declared here are in the global scopeARG GLOBAL_ARG="global default value"ARG VERSION="3.21"# You can't declare environment variables in the global scopeENV GLOBAL_ENV=false# GLOBAL_ARG was not redeclared in this stageRUN echo $GLOBAL_ARG# LOCAL_ARG was declared in stage-aRUN echo $LOCAL_ARGstage-bFROM --platform=$BUILDPLATFORM alpine:${VERSION} as stage-bstage-a# FROM-lines belong to the global scope and have access to global ARGsFROM alpine:${VERSION} as stage-a# Redeclaring GLOBAL_ARG without a value inherits the global defaultARG GLOBAL_ARGRUN echo $GLOBAL_ARG# ARG here this scope creates a local argumentARG LOCAL_ARG="local arg in stage-a"# Set an environment variable in this scopeENV LOCAL_ENV=true# Set an environment variable to the value of a build argumentENV MY_VAR=$LOCAL_ARGstage-c# New stage based on "stage-a"FROM stage-a AS stage-c# Arguments and variables are inherited from parent stagesRUN echo $LOCAL_ARGRUN echo $LOCAL_ENV<- prints an empty string<- prints an empty string<- prints "global default value"<- prints "local arg in stage-a"<- prints "true"ARG TARGETPLATFORM# You must redeclare pre-defined arguments to use them in a stageRUN echo $TARGETPLATFORM<- prints os/arch/variant of --platform# Pre-defined multi-platform arguments like $BUILDPLATFORM are global diff --git a/content/manuals/build/metadata/annotations.md b/content/manuals/build/metadata/annotations.md index 330deb81813..ee1f44a488f 100644 --- a/content/manuals/build/metadata/annotations.md +++ b/content/manuals/build/metadata/annotations.md @@ -6,6 +6,8 @@ aliases: - /build/building/annotations/ --- + + Annotations provide descriptive metadata for images. Use annotations to record arbitrary information and attach it to your image, which helps consumers and tools understand the origin, contents, and how to use the image. @@ -39,7 +41,7 @@ You can add annotations to an image at build-time, or when creating the image manifest or index. > [!NOTE] -> +> > The Docker Engine image store doesn't support loading images with > annotations. To build with annotations, make sure to push the image directly > to a registry, using the `--push` CLI flag or the @@ -68,7 +70,7 @@ For examples on how to add annotations to images built with GitHub Actions, see You can also add annotations to an image created using `docker buildx imagetools create`. This command only supports adding annotations to an index or manifest descriptors, see -[CLI reference](/reference/cli/docker/buildx/imagetools/create.md#annotation). +[CLI reference](/reference/cli/docker/buildx/imagetools/create/#annotation). ## Inspect annotations @@ -202,9 +204,9 @@ Related articles: Reference information: -- [`docker buildx build --annotation`](/reference/cli/docker/buildx/build.md#annotation) +- [`docker buildx build --annotation`](/reference/cli/docker/buildx/build/#annotation) - [Bake file reference: `annotations`](/manuals/build/bake/reference.md#targetannotations) -- [`docker buildx imagetools create --annotation`](/reference/cli/docker/buildx/imagetools/create.md#annotation) +- [`docker buildx imagetools create --annotation`](/reference/cli/docker/buildx/imagetools/create/#annotation) diff --git a/content/manuals/build/metadata/attestations/_index.md b/content/manuals/build/metadata/attestations/_index.md index e18977bf467..2d7959f3543 100644 --- a/content/manuals/build/metadata/attestations/_index.md +++ b/content/manuals/build/metadata/attestations/_index.md @@ -20,7 +20,7 @@ you to make informed decisions about how an image impacts the supply chain secur of your application. It also enables the use of policy engines for validating images based on policy rules you've defined. -Two types of build annotations are available: +Two types of build attestations are available: - Software Bill of Material (SBOM): list of software artifacts that an image contains, or that were used to build the image. @@ -43,39 +43,27 @@ to see if images you are already using are exposed to vulnerabilities. ## Creating attestations -When you build an image with `docker buildx build`, you can add attestation -records to the resulting image using the `--provenance` and `--sbom` options. -You can opt in to add either the SBOM or provenance attestation type, or both. - -```console -$ docker buildx build --sbom=true --provenance=true . +BuildKit generates the attestations when building the image. Provenance +attestations with the `mode=min` level are added to images by default. The +attestation records are wrapped in the in-toto JSON format and attached to the +image index in a manifest for the final image. + +You can customize attestation behavior using the `--provenance` and `--sbom` +flags: + +```bash +# Opt in to SBOM attestations: +docker buildx build --sbom=true . +# Opt in to max-level provenance attestations: +docker buildx build --provenance=mode=max . +# Opt out of provenance attestations: +docker buildx build --provenance=false . ``` -> [!NOTE] -> -> The default image store doesn't support attestations. If you're using the -> default image store and you build an image using the default `docker` driver, -> or using a different driver with the `--load` flag, the attestations are -> lost. -> -> To make sure the attestations are preserved, you can: -> -> - Use a `docker-container` driver with the `--push` flag to push the image to -> a registry directly. -> - Enable the [containerd image store](/manuals/desktop/features/containerd.md). - -> [!NOTE] -> -> Provenance attestations are enabled by default, with the `mode=min` option. -> You can disable provenance attestations using the `--provenance=false` flag, -> or by setting the [`BUILDX_NO_DEFAULT_ATTESTATIONS`](/manuals/build/building/variables.md#buildx_no_default_attestations) environment variable. -> -> Using the `--provenance=true` flag attaches provenance attestations with `mode=max` -> by default. See [Provenance attestation](./slsa-provenance.md) for more details. - -BuildKit generates the attestations when building the image. The attestation -records are wrapped in the in-toto JSON format and attached to the image -index in a manifest for the final image. +You can also disable default provenance attestations by setting the +[`BUILDX_NO_DEFAULT_ATTESTATIONS`](/manuals/build/building/variables.md#buildx_no_default_attestations) +environment variable. See [Provenance attestation](./slsa-provenance.md) for +more details about provenance modes and options. ## Storage diff --git a/content/manuals/build/metadata/attestations/sbom.md b/content/manuals/build/metadata/attestations/sbom.md index ffbd5354317..ee34dc89566 100644 --- a/content/manuals/build/metadata/attestations/sbom.md +++ b/content/manuals/build/metadata/attestations/sbom.md @@ -2,15 +2,12 @@ title: SBOM attestations keywords: build, attestations, sbom, spdx, metadata, packages description: | - SBOM build attestations describe the contents of your image, - and the packages used to build it. + SBOM attestations describe what software artifacts an image contains and the artifacts used to create the image. aliases: - /build/attestations/sbom/ --- -Software Bill of Materials (SBOM) attestations describe what software artifacts -an image contains, and artifacts used to create the image. Metadata included in -an SBOM for describing software artifacts may include: +SBOM attestations help ensure [software supply chain transparency](/guides/docker-scout/s3c.md) by verifying the software artifacts an image contains and the artifacts used to create the image. Metadata included in an [SBOM](/guides/docker-scout/sbom.md) for describing software artifacts may include: - Name of the artifact - Version @@ -18,14 +15,9 @@ an SBOM for describing software artifacts may include: - Authors - Unique package identifier -There are benefits to indexing contents of an image during the build, as opposed -to scanning a final image. When scanning happens as part of the build, you're -able to detect software you use to build the image, that may not show up in the -final image. +Indexing the contents of an image during the build has benefits over scanning a final image. When scanning happens as part of the build, you can detect software you used to build the image, which may not show up in the final image. -The SBOMs generated by BuildKit follow the SPDX standard. SBOMs attach to the -final image as a JSON-encoded SPDX document, using the format defined by the -[in-toto SPDX predicate](https://github.com/in-toto/attestation/blob/main/spec/predicates/spdx.md). +Docker supports SBOM generation and attestation through an SLSA-compliant build process using BuildKit and attestations. The SBOMs generated by [BuildKit](/manuals/build/buildkit/_index.md) follow the SPDX standard and attach to the final image as a JSON-encoded SPDX document, using the format defined by the [in-toto SPDX predicate](https://github.com/in-toto/attestation/blob/main/spec/predicates/spdx.md). On this page, you’ll learn how to create, manage, and verify SBOM attestations using Docker tooling. ## Create SBOM attestations @@ -175,7 +167,7 @@ sbom.spdx.json ## Inspecting SBOMs To explore created SBOMs exported through the `image` exporter, you can use -[`imagetools inspect`](/reference/cli/docker/buildx/imagetools/inspect.md). +[`imagetools inspect`](/reference/cli/docker/buildx/imagetools/inspect/). Using the `--format` option, you can specify a template for the output. All SBOM-related data is available under the `.SBOM` attribute. For example, to get @@ -210,7 +202,7 @@ base-passwd@3.5.47 ## SBOM generator -BuildKit generates the SBOM using a scanner plugin. By default, it uses is the +BuildKit generates the SBOM using a scanner plugin. By default, it uses the [BuildKit Syft scanner](https://github.com/docker/buildkit-syft-scanner) plugin. This plugin is built on top of [Anchore's Syft](https://github.com/anchore/syft), diff --git a/content/manuals/build/metadata/attestations/slsa-provenance.md b/content/manuals/build/metadata/attestations/slsa-provenance.md index f3add2da14d..54e00c78933 100644 --- a/content/manuals/build/metadata/attestations/slsa-provenance.md +++ b/content/manuals/build/metadata/attestations/slsa-provenance.md @@ -16,8 +16,10 @@ details such as: - Source code details - Materials (files, scripts) consumed during the build -Provenance attestations follow the -[SLSA provenance schema, version 0.2](https://slsa.dev/provenance/v0.2#schema). +By default, provenance attestations follow the +[SLSA provenance schema, version 0.2](https://slsa.dev/spec/v0.2/provenance#schema). +You can optionally enable [SLSA Provenance v1](https://slsa.dev/spec/v1.1/provenance#schema) +using [the `version` parameter](#version). For more information about how BuildKit populates these provenance properties, refer to [SLSA definitions](slsa-definitions.md). @@ -29,11 +31,12 @@ to the `docker buildx build` command: ```console $ docker buildx build --tag /: \ - --attest type=provenance,mode=[min,max] . + --attest type=provenance,mode=[min,max],version=[v0.2,v1] . ``` Alternatively, you can use the shorthand `--provenance=true` option instead of `--attest type=provenance`. -To specify the `mode` parameter using the shorthand option, use: `--provenance=mode=max`. +To specify the `mode` or `version` parameters using the shorthand option, use: +`--provenance=mode=max,version=v1`. For an example on how to add provenance attestations with GitHub Actions, see [Add attestations with GitHub Actions](/manuals/build/ci/github-actions/attestations.md). @@ -41,8 +44,8 @@ For an example on how to add provenance attestations with GitHub Actions, see ## Mode You can use the `mode` parameter to define the level of detail to be included in -the provenance attestation. Supported values are `mode=min`, and `mode=max` -(default). +the provenance attestation. Supported values are `mode=min` (default) and +`mode=max`. ### Min @@ -56,8 +59,8 @@ such as: - Build platform - Reproducibility -Values of build arguments, the identities of secrets, and rich layer metadata is -not included `mode=min`. The `min`-level provenance is safe to use for all +Values of build arguments, the identities of secrets, and rich layer metadata are +not included in `mode=min`. The `min`-level provenance is safe to use for all builds, as it doesn't leak information from any part of the build environment. The following JSON example shows the information included in a provenance @@ -143,17 +146,34 @@ detailed information for analysis. > [!WARNING] > > Note that `mode=max` exposes the values of -> [build arguments](/reference/cli/docker/buildx/build.md#build-arg). +> [build arguments](/reference/cli/docker/buildx/build/#build-arg). > > If you're misusing build arguments to pass credentials, authentication > tokens, or other secrets, you should refactor your build to pass the secrets using -> [secret mounts](/reference/cli/docker/buildx/build.md#secret) instead. +> [secret mounts](/reference/cli/docker/buildx/build/#secret) instead. > Secret mounts don't leak outside of the build and are never included in provenance attestations. +## Version + +The `version` parameter lets you specify which SLSA provenance schema version +to use. Supported values are `version=v0.2` (default) and `version=v1`. + +To use SLSA Provenance v1: + +```console +$ docker buildx build --tag /: \ + --attest type=provenance,mode=max,version=v1 . +``` + +For more information about SLSA Provenance v1, see the +[SLSA specification](https://slsa.dev/spec/v1.1/provenance). To see the +difference between SLSA v0.2 and v1 provenance attestations, refer to +[SLSA definitions](./slsa-definitions.md) + ## Inspecting Provenance To explore created Provenance exported through the `image` exporter, you can -use [`imagetools inspect`](/reference/cli/docker/buildx/imagetools/inspect.md). +use [`imagetools inspect`](/reference/cli/docker/buildx/imagetools/inspect/). Using the `--format` option, you can specify a template for the output. All provenance-related data is available under the `.Provenance` attribute. For @@ -175,7 +195,7 @@ extract the full source code of the Dockerfile used to build the image: ```console $ docker buildx imagetools inspect /: \ --format '{{ range (index .Provenance.SLSA.metadata "https://mobyproject.org/buildkit@v1#metadata").source.infos }}{{ if eq .filename "Dockerfile" }}{{ .data }}{{ end }}{{ end }}' | base64 -d -FROM ubuntu:20.04 +FROM ubuntu:24.04 RUN apt-get update ... ``` diff --git a/content/manuals/build/policies/_index.md b/content/manuals/build/policies/_index.md new file mode 100644 index 00000000000..5079a8e736f --- /dev/null +++ b/content/manuals/build/policies/_index.md @@ -0,0 +1,167 @@ +--- +title: Validating build inputs with policies +linkTitle: Validating builds +description: Secure your Docker builds by validating images, Git repositories, and dependencies with build policies +keywords: build policies, opa, rego, docker security, supply chain, attestations +weight: 70 +params: + sidebar: + badge: + color: blue + text: Experimental +--- + +Building with Docker often involves downloading remote resources. These +external dependencies, such as Docker images, Git repositories, remote files, +and other artifacts, are called build inputs. + +For example: + +- Pulling images from a registry +- Cloning a source code repository +- Fetching files from a server over HTTPS + +When consuming build inputs, it's a good idea to verify the contents are what +you expect them to be. One way to do this is to use the `--checksum` option for +the `ADD` Dockerfile instruction. This lets you verify the SHA256 checksum of a +remote resource when pulling it into a build: + +```dockerfile +ADD --checksum=sha256:c0ff3312345… https://example.com/archive.tar.gz / +``` + +If the remote `archive.tar.gz` file does not match the checksum that the +Dockerfile expects, the build fails. + +Checksums verify that content matches what you expect, but only for the `ADD` +instruction. They don't tell you anything about where the content came from or +how it was produced. You can't use checksums to enforce constraints like +"images must be signed" or "dependencies must come from approved sources." + +Build policies solve this problem. They let you define rules that validate all +your build inputs, enforcing requirements like provenance attestations, +approved registries, and signed Git tags across your entire build process. + +## Prerequisites + +Build policies is currently an experimental feature. To try it out, you'll +need: + +- Buildx 0.31.0 or later - Check your version: `docker buildx version` +- BuildKit 0.27.0 or later - Verify with: `docker buildx inspect --bootstrap` + +If you're using Docker Desktop, ensure you're on a version that includes these +updates. + +## Build policies + +Buildx version 0.31.0 added support for build policies. Build policies are +rules for securing your Docker build supply chain, and help protect against +upstream compromises, malicious dependencies, and unauthorized modifications to +your build inputs. + +Build policies let you enforce extended verifications on inputs used to build +your projects, such as: + +- Docker images must use digest references (not tags alone) +- Images must have provenance attestations and cosign signatures +- Git tags are signed by maintainers with a PGP public key +- All remote artifacts must use HTTPS and include a checksum for verification + +Build policies are defined in a declarative policy language, called Rego, +created for the [Open Policy Agent (OPA)](https://www.openpolicyagent.org/). +The following example shows a minimal build policy in Rego. + +```rego {title="Dockerfile.rego"} +package docker + +default allow := false + +# Allow any local inputs for this build +# For example: a local build context, or a local Dockerfile +allow if input.local + +# Allow images, but only if they have provenance attestations +allow if { + input.image.hasProvenance +} + +decision := {"allow": allow} +``` + +If the Dockerfile associated with this policy references an image with no +provenance attestation in a `FROM` instruction, the policy would be violated +and the build would fail. + +## How policies work + +When you run `docker buildx build`, Buildx: + +1. Resolves all build inputs (images, Git repos, HTTP downloads) +2. Looks for a policy file matching your Dockerfile name (e.g., + `Dockerfile.rego`) +3. Evaluates each input against the policy before the build starts +4. Allows the build to proceed only if all inputs pass the policy + +Policies are written in Rego (Open Policy Agent's policy language). You don't +need to be a Rego expert - the [Introduction](./intro.md) tutorial teaches you +everything needed. + +Policy files live alongside your Dockerfile: + +```text +project/ +├── Dockerfile +├── Dockerfile.rego +└── src/ +``` + +No additional configuration is needed - Buildx automatically finds and loads +the policy when you build. + +## Use cases + +Build policies help you enforce security and compliance requirements on your +Docker builds. Common scenarios where policies provide value: + +### Enforce base image standards + +Require all production Dockerfiles to use specific, approved base images with +digest references. Prevent developers from using arbitrary images that haven't +been vetted by your security team. + +### Validate third-party dependencies + +When your build downloads files, libraries, or tools from the internet, verify +they come from trusted sources and match expected checksums or signatures. This +protects against supply chain attacks where an upstream dependency is +compromised. + +### Ensure signed releases + +Require that all dependencies have valid signatures from trusted parties. + +- Check GPG signatures for Git repositories you clone in your builds +- Verify provenance attestation signatures with Sigstore + +### Meet compliance requirements + +Some regulatory frameworks require evidence that you validate your build +inputs. Build policies give you an auditable, declarative way to demonstrate +you're checking dependencies against security standards. + +### Separate development and production rules + +Apply stricter validation for production builds while allowing more flexibility +during development. The same policy file can contain conditional rules based on +build context or target. + +## Get started + +Ready to start writing policies? The [Introduction](./intro.md) tutorial walks +you through creating your first policy and teaches the Rego basics you need. + +For practical usage guidance, see [Using build policies](./usage.md). + +For practical examples you can copy and adapt, see the [Example +policies](./examples.md) library. diff --git a/content/manuals/build/policies/built-ins.md b/content/manuals/build/policies/built-ins.md new file mode 100644 index 00000000000..310b25c92b7 --- /dev/null +++ b/content/manuals/build/policies/built-ins.md @@ -0,0 +1,205 @@ +--- +title: Built-in functions +linkTitle: Built-in functions +description: Buildx includes built-in helper functions to make writing policies easier +keywords: build policies, built-in functions, rego functions, signature verification, policy helpers +weight: 90 +--- + +Buildx provides built-in functions, in addition to the [Rego +built-ins](#rego-built-in-functions), to extend Rego policies with +Docker-specific operations like loading local files, verifying Git signatures, +and pinning image digests. + +## Rego built-in functions + +The functions [documented on this page](#buildx-built-in-functions) are +Buildx-specific functions, distinct from [Rego's standard built-in +functions](https://www.openpolicyagent.org/docs/policy-language#built-in-functions) + +Buildx also supports standard Rego built-in functions, but only a subset. To +see the exact list of supported functions, refer to the Buildx [source +code](https://github.com/docker/buildx/blob/master/policy/builtins.go). + +## Buildx built-in functions + +Buildx provides the following custom built-in functions for policy development: + +- [`print`](#print) +- [`load_json`](#load_json) +- [`verify_git_signature`](#verify_git_signature) +- [`pin_image`](#pin_image) + +### `print` + +Outputs debug information during policy evaluation. + +Parameters: + +- Any number of values to print + +Returns: The values (pass-through) + +Example: + +```rego +allow if { + input.image.repo == "alpine" + print("Allowing alpine image:", input.image.tag) +} +``` + +Debug output appears when building with `--progress=plain`. + +### `load_json` + +Loads and parses JSON data from local files in the build context. + +Parameters: + +- `filename` (string) - Path to JSON file relative to policy directory + +Returns: Parsed JSON data as Rego value + +Example: + +```rego +# Load approved versions from external file +approved_versions = load_json("versions.json") + +allow if { + input.image.repo == "alpine" + some version in approved_versions.alpine + input.image.tag == version +} +``` + +File structure: + +```text +project/ +├── Dockerfile +├── Dockerfile.rego +└── versions.json +``` + +versions.json: + +```json +{ + "alpine": ["3.19", "3.20"], + "golang": ["1.21", "1.22"] +} +``` + +The JSON file must be in the same directory as the policy or in a +subdirectory accessible from the policy location. + +### `verify_git_signature` + +Verifies PGP signatures on Git commits or tags. + +Parameters: + +- `git_object` (object) - Either `input.git.commit` or `input.git.tag` +- `keyfile` (string) - Path to PGP public key file (relative to policy + directory) + +Returns: Boolean - `true` if signature is valid, `false` otherwise + +Example: + +```rego +# Require signed Git tags +allow if { + input.git.tagName != "" + verify_git_signature(input.git.tag, "maintainer.asc") +} + +# Require signed commits +allow if { + input.git.commit + verify_git_signature(input.git.commit, "keys/team.asc") +} +``` + +Directory structure: + +```text +project/ +├── Dockerfile.rego +└── maintainer.asc # PGP public key +``` + +Or with subdirectory: + +```text +project/ +├── Dockerfile.rego +└── keys/ + ├── maintainer.asc + └── team.asc +``` + +Obtaining public keys: + +```console +$ gpg --export --armor user@example.com > maintainer.asc +``` + +### `pin_image` + +Pins an image to a specific digest, overriding the tag-based reference. Use +this to force builds to use specific image versions. + +Parameters: + +- `image_object` (object) - Must be `input.image` (the current image being + evaluated) +- `digest` (string) - Target digest in format `sha256:...` + +Returns: Boolean - `true` if pinning succeeds + +Example: + +```rego +# Pin alpine 3.19 to specific digest +alpine_3_19_digest = "sha256:4b7ce07002c69e8f3d704a9c5d6fd3053be500b7f1c69fc0d80990c2ad8dd412" + +allow if { + input.image.repo == "alpine" + input.image.tag == "3.19" + pin_image(input.image, alpine_3_19_digest) +} +``` + +Automatic digest replacement: + +```rego +# Replace old digests with patched versions +replace_map = { + "3.22.0": "3.22.2", + "3.22.1": "3.22.2", +} + +alpine_digests = { + "3.22.0": "sha256:8a1f59ffb675680d47db6337b49d22281a139e9d709335b492be023728e11715", + "3.22.2": "sha256:4b7ce07002c69e8f3d704a9c5d6fd3053be500b7f1c69fc0d80990c2ad8dd412", +} + +allow if { + input.image.repo == "alpine" + some old_version, new_version in replace_map + input.image.checksum == alpine_digests[old_version] + print("Replacing", old_version, "with", new_version) + pin_image(input.image, alpine_digests[new_version]) +} +``` + +This pattern automatically upgrades old image versions to patched releases. + +## Next steps + +- Browse complete examples: [Example policies](./examples.md) +- Learn policy development workflow: [Using build policies](./usage.md) +- Reference input fields: [Input reference](./inputs.md) diff --git a/content/manuals/build/policies/debugging.md b/content/manuals/build/policies/debugging.md new file mode 100644 index 00000000000..16253a40d88 --- /dev/null +++ b/content/manuals/build/policies/debugging.md @@ -0,0 +1,188 @@ +--- +title: Debugging build policies +linkTitle: Debugging +description: Debug policies during development with inspection and testing tools +keywords: build policies, debugging, policy troubleshooting, log-level, policy eval, rego debugging +weight: 70 +--- + +When policies don't work as expected, use the tools available to inspect policy +evaluation and understand what's happening. This guide covers the debugging +techniques and common gotchas. + +## Quick reference + +Essential debugging commands: + +```console +# See complete input data during builds (recommended) +$ docker buildx build --progress=plain --policy log-level=debug . + +# See policy checks and decisions +$ docker buildx build --progress=plain . + +# Explore input structure for different sources +$ docker buildx policy eval --print . +$ docker buildx policy eval --print https://github.com/org/repo.git +$ docker buildx policy eval --print docker-image://alpine:3.19 + +# Test if policy allows a source +$ docker buildx policy eval . +``` + +## Policy output with `--progress=plain` + +To see policy evaluation during builds, use `--progress=plain`: + +```console +$ docker buildx build --progress=plain . +``` + +This shows all policy checks, decisions, and `print()` output. Without +`--progress=plain`, policy evaluation is silent unless there's an error. + +```plaintext +#1 loading policies Dockerfile.rego +#1 0.010 checking policy for source docker-image://alpine:3.19 (linux/arm64) +#1 0.011 Dockerfile.rego:8: image: {"ref":"alpine:3.19","repo":"alpine","tag":"3.19"} +#1 0.012 policy decision for source docker-image://alpine:3.19: ALLOW +``` + +If a policy denies a source, you'll see: + +```text +#1 0.012 policy decision for source docker-image://nginx:latest: DENY +ERROR: source "docker-image://nginx:latest" not allowed by policy +``` + +## Debug logging + +For detailed debugging, add `--policy log-level=debug` to see the full input +JSON, unresolved fields, and policy responses: + +```console +$ docker buildx build --progress=plain --policy log-level=debug . +``` + +This shows significantly more information than the default level, including the +complete input structure for each source without needing `print()` statements +in your policy. + +Complete input JSON: + +```text +#1 0.007 policy input: { +#1 0.007 "env": { +#1 0.007 "filename": "." +#1 0.007 }, +#1 0.007 "image": { +#1 0.007 "ref": "docker.io/library/alpine:3.19", +#1 0.007 "host": "docker.io", +#1 0.007 "repo": "alpine", +#1 0.007 "fullRepo": "docker.io/library/alpine", +#1 0.007 "tag": "3.19", +#1 0.007 "platform": "linux/arm64", +#1 0.007 "os": "linux", +#1 0.007 "arch": "arm64" +#1 0.007 } +#1 0.007 } +``` + +Unresolved fields: + +```text +#1 0.007 unknowns for policy evaluation: [input.image.checksum input.image.labels input.image.user input.image.volumes input.image.workingDir input.image.env input.image.hasProvenance input.image.signatures] +``` + +Policy response: + +```text +#1 0.008 policy response: map[allow:true] +``` + +This detailed output is invaluable for understanding exactly what data your +policy receives and which fields are not yet resolved. Use debug logging when +developing policies to avoid needing extensive `print()` statements. + +## Conditional debugging with print() + +While `--policy log-level=debug` shows all input data automatically, the +`print()` function is useful for debugging specific rule logic and conditional +flows: + +```rego +allow if { + input.image + print("Checking image:", input.image.repo, "isCanonical:", input.image.isCanonical) + input.image.repo == "alpine" + input.image.isCanonical +} +``` + +Use `print()` to debug conditional logic within rules or track which rules are +evaluating. For general input inspection during development, use `--policy +log-level=debug` instead - it requires no policy modifications. + +> [!NOTE] +> Print statements only execute when their containing rule evaluates. A rule +> like `allow if { input.image; print(...) }` only prints for image inputs, +> not for Git repos, HTTP downloads, or local files. + +## Common issues + +### Full repository path or repository name + +Symptom: Policy checking repository names doesn't match as expected. + +Cause: Docker Hub images use `input.image.repo` for the short name +(`"alpine"`) but `input.image.fullRepo` includes the full path +(`"docker.io/library/alpine"`). + +Solution: + +```rego +# Match just the repo name (works for Docker Hub and other registries) +allow if { + input.image + input.image.repo == "alpine" +} + +# Or match the full repository path +allow if { + input.image + input.image.fullRepo == "docker.io/library/alpine" +} +``` + +### Policy evaluation happens multiple times + +Symptom: Build output shows the same source evaluated multiple times. + +Cause: BuildKit may evaluate policies at different stages (reference +resolution, actual pull) or for different platforms. + +This is normal behavior. Policies should be idempotent (produce same result +each time for the same input). + +### Fields missing with `policy eval --print` + +Symptom: `docker buildx policy eval --print` doesn't show expected fields +like `hasProvenance`, `labels`, or `checksum`. + +Cause: `--print` shows only reference information by default, without +fetching from registries. + +Solution: Use `--fields` to fetch specific metadata fields: + +```console +$ docker buildx policy eval --print --fields image.labels docker-image://alpine:3.19 +``` + +See [Using build policies](./usage.md#testing-policies-with-policy-eval) for +details. + +## Next steps + +- See complete field reference: [Input reference](./inputs.md) +- Review example policies: [Examples](./examples.md) +- Learn policy usage patterns: [Using build policies](./usage.md) diff --git a/content/manuals/build/policies/examples.md b/content/manuals/build/policies/examples.md new file mode 100644 index 00000000000..af4edbb6af1 --- /dev/null +++ b/content/manuals/build/policies/examples.md @@ -0,0 +1,585 @@ +--- +title: Policy templates and examples +linkTitle: Templates & examples +description: Browse policy examples from quick-start configs to production-grade security templates +keywords: build policies, policy examples, rego examples, docker security, registry allowlist, policy templates +weight: 50 +--- + +This page provides complete, working policy examples you can copy and adapt. +The examples are organized into two sections: getting started policies for +quick adoption, and production templates for comprehensive security. + +If you're new to policies, start with the tutorials: +[Introduction](./intro.md), [Image validation](./validate-images.md), and [Git +validation](./validate-git.md). Those pages teach individual techniques. This +page shows complete policies combining those techniques. + +## How to use these examples + +1. Copy the policy code into a `Dockerfile.rego` file next to your + Dockerfile +2. Customize any todo comments with your specific values +3. Test by running `docker build .` and verifying the policy works as + expected +4. Refine based on your team's needs + +### Using examples with bake + +These policies work with both `docker buildx build` and `docker buildx bake`. +For bake, place the policy alongside your Dockerfile and it loads +automatically. To use additional policies: + +```hcl +target "default" { + dockerfile = "Dockerfile" + policy = ["extra.rego"] +} +``` + +See the [Usage guide](./usage.md) for complete bake integration details. + +## Getting started + +These policies work immediately with minimal or no customization. Use them to +adopt policies quickly and demonstrate value to your team. + +### Development-friendly baseline + +A permissive policy that allows typical development workflows while blocking +obvious security issues. + +```rego +package docker + +default allow := false + +allow if input.local +allow if input.git + +# Allow common public registries +allow if { + input.image.host == "docker.io" # Docker Hub +} + +allow if { + input.image.host == "ghcr.io" # GitHub Container Registry +} + +allow if { + input.image.host == "dhi.io" # Docker Hardened Images +} + +# Require HTTPS for all downloads +allow if { + input.http.schema == "https" +} + +decision := {"allow": allow} +``` + +This policy allows local and Git contexts, images from Docker Hub, GitHub +Container Registry, and [Docker Hardened Images](/dhi/), and `ADD` downloads +over HTTPS. It blocks HTTP downloads and non-standard registries. + +When to use: Starting point for teams new to policies. Provides basic security +without disrupting development workflows. + +### Registry allowlist + +Control which registries your builds can pull images from. + +```rego +package docker + +default allow := false + +allow if input.local + +# TODO: Add your internal registry hostname +allowed_registries := ["docker.io", "ghcr.io", "dhi.io", "registry.company.com"] + +allow if { + input.image.host in allowed_registries +} + +# Allow mirrored DHI images from Docker Hub (DHI Enterprise users) +# TODO: Replace with your organization namespace +allow if { + input.image.host == "docker.io" + startswith(input.image.repo, "myorg/dhi-") +} + +deny_msg contains msg if { + not allow + input.image + msg := sprintf("registry %s is not in the allowlist", [input.image.host]) +} + +decision := {"allow": allow, "deny_msg": deny_msg} +``` + +This policy restricts image pulls to approved registries. Customize and add +your internal registry to the list. If you have a DHI Enterprise subscription +and have mirrored Docker Hardened Images to Docker Hub, add a rule to allow +images from your organization's namespace. + +When to use: Enforce corporate policies about approved image sources. Prevents +developers from using arbitrary public registries. + +### Pin base images to digests + +Require digest references for reproducible builds. + +```rego +package docker + +default allow := false + +allow if input.local + +# Require digest references for all images +allow if { + input.image.isCanonical +} + +deny_msg contains msg if { + not allow + input.image + msg := sprintf("image %s must use digest reference (e.g., @sha256:...)", [input.image.ref]) +} + +decision := {"allow": allow, "deny_msg": deny_msg} +``` + +This policy requires images use digest references like +`alpine@sha256:abc123...` instead of tags like `alpine:3.19`. Digests are +immutable - the same digest always resolves to the same image content. + +When to use: Ensure build reproducibility. Prevents builds from breaking when +upstream tags are updated. Required for compliance in some environments. + +### Control external dependencies + +Pin specific versions of dependencies downloaded during builds. + +```rego +package docker + +default allow := false + +allow if input.local + +# Allow any image (add restrictions as needed) +allow if input.image + +# TODO: Add your allowed Git repositories and tags +allowed_repos := { + "https://github.com/moby/buildkit.git": ["v0.26.1", "v0.27.0"], +} +# Only allow Git input from allowed_repos +allow if { + some repo, versions in allowed_repos + input.git.remote == repo + input.git.tagName in versions +} + +# TODO: Add your allowed downloads +allow if { + input.http.url == "https://example.com/app-v1.0.tar.gz" +} + +decision := {"allow": allow} +``` + +This policy creates allowlists for external dependencies. Add your Git +repositories with approved version tags, and URLs. + +When to use: Control which external dependencies can be used in builds. +Prevents builds from pulling arbitrary versions or unverified downloads. + +## Production templates + +These templates demonstrate comprehensive security patterns. They require +customization but show best practices for production environments. + +### Image attestation and provenance + +Require images have provenance attestations from trusted builders. + +```rego +package docker + +default allow := false + +allow if input.local + +# TODO: Add your repository names +allowed_repos := ["myorg/backend", "myorg/frontend", "myorg/worker"] + +# Production images need full attestations +allow if { + some repo in allowed_repos + input.image.repo == repo + input.image.hasProvenance + some sig in input.image.signatures + trusted_github_builder(sig, repo) +} + +# Helper to validate GitHub Actions build from main branch +trusted_github_builder(sig, repo) if { + sig.signer.certificateIssuer == "CN=sigstore-intermediate,O=sigstore.dev" + sig.signer.issuer == "https://token.actions.githubusercontent.com" + startswith(sig.signer.buildSignerURI, sprintf("https://github.com/myorg/%s/.github/workflows/", [repo])) + sig.signer.sourceRepositoryRef == "refs/heads/main" + sig.signer.runnerEnvironment == "github-hosted" +} + +# Allow Docker Hardened Images with built-in attestations +allow if { + input.image.host == "dhi.io" + input.image.isCanonical + input.image.hasProvenance +} + +# Allow official base images with digests +allow if { + input.image.repo == "alpine" + input.image.host == "docker.io" + input.image.isCanonical +} + +decision := {"allow": allow} +``` + +This template validates that your application images have provenance +attestations, and were built by GitHub Actions from your main branch. Docker +Hardened Images are allowed when using digests since they include comprehensive +attestations by default. Other base images must use digests. + +Customize: + +- Replace `allowed_repos` with your image names +- Update the organization name in `trusted_github_builder()` +- Add rules for other base images you use + +When to use: Enforce supply chain security for production deployments. Ensures +images are built by trusted CI/CD pipelines with auditable provenance. + +### Signed Git releases + +Enforce signed tags from trusted maintainers for Git dependencies. + +```rego +package docker + +default allow := false + +allow if input.local + +allow if input.image + +# TODO: Replace with your repository URL +is_buildkit if { + input.git.remote == "https://github.com/moby/buildkit.git" +} + +is_version_tag if { + is_buildkit + regex.match(`^v[0-9]+\.[0-9]+\.[0-9]+$`, input.git.tagName) +} + +# Version tags must be signed +allow if { + is_version_tag + input.git.tagName != "" + verify_git_signature(input.git.tag, "maintainers.asc") +} + +# Allow unsigned refs for development +allow if { + is_buildkit + not is_version_tag +} + +decision := {"allow": allow} +``` + +This template requires production release tags to be signed by trusted +maintainers. Development branches and commits can be unsigned. + +Setup: + +1. Export maintainer PGP public keys to `maintainers.asc`: + ```console + $ gpg --export --armor user1@example.com user2@example.com > maintainers.asc + ``` +2. Place `maintainers.asc` in the same directory as your policy file + +Customize: + +- Replace the repository URL in `is_buildkit` +- Update the maintainers in the PGP keyring file +- Adjust the version tag regex pattern if needed + +When to use: Validate that production dependencies come from signed releases. +Protects against compromised releases or unauthorized updates. + +### Multi-registry policy + +Apply different validation rules for internal and external registries. + +```rego +package docker + +default allow := false + +allow if input.local + +# TODO: Replace with your internal registry hostname +internal_registry := "registry.company.com" + +# Internal registry: basic validation +allow if { + input.image.host == internal_registry +} + +# External registries: strict validation +allow if { + input.image.host != internal_registry + input.image.host != "" + input.image.isCanonical + input.image.hasProvenance +} + +# Docker Hub: allowlist specific images +allow if { + input.image.host == "docker.io" + # TODO: Add your approved base images + input.image.repo in ["alpine", "golang", "node"] + input.image.isCanonical +} + +# Docker Hardened Images: trusted by default with built-in attestations +allow if { + input.image.host == "dhi.io" + input.image.isCanonical +} + +decision := {"allow": allow} +``` + +This template defines a trust boundary between internal and external image +sources. Internal images require minimal validation, while external images need +digests and provenance. Docker Hardened Images from `dhi.io` are treated as +trusted since they include comprehensive attestations and security guarantees. + +Customize: + +- Set your internal registry hostname +- Add your approved Docker Hub base images +- Adjust validation requirements based on your security policies + +When to use: Organizations with internal registries that need different rules +for internal and external sources. Balances security with practical workflow +needs. + +### Multi-environment policy + +Apply different rules based on the build target or stage. For example, + +```rego +package docker + +default allow := false + +allow if input.local + +# TODO: Define your environment detection logic +is_production if { + input.env.target == "production" +} + +is_development if { + input.env.target == "development" +} + +# Production: strict rules - only digest images with provenance +allow if { + is_production + input.image.isCanonical + input.image.hasProvenance +} + +# Development: permissive rules - any image +allow if { + is_development + input.image +} + +# Staging inherits production rules (default target detection) +allow if { + not is_production + not is_development + input.image.isCanonical +} + +decision := {"allow": allow} +``` + +This template uses build targets to apply different validation levels. +Production requires attestations and digests, development is permissive, and +staging uses moderate rules. + +Customize: + +- Update environment detection logic (target names, build args, etc.) +- Adjust validation requirements for each environment +- Add more environments as needed + +When to use: Teams with separate build configurations for different deployment +stages. Allows flexibility in development while enforcing strict rules for +production. + +### Complete dependency pinning + +Pin all external dependencies to specific versions across all input types. + +```rego +package docker + +default allow := false + +allow if input.local + +# TODO: Add your pinned images with exact digests +# Docker Hub images use docker.io as host +allowed_dockerhub := { + "alpine": "sha256:4b7ce07002c69e8f3d704a9c5d6fd3053be500b7f1c69fc0d80990c2ad8dd412", + "golang": "sha256:abc123...", +} + +allow if { + input.image.host == "docker.io" + some repo, digest in allowed_dockerhub + input.image.repo == repo + input.image.checksum == digest +} + +# TODO: Add your pinned DHI images +allowed_dhi := { + "python": "sha256:def456...", + "node": "sha256:ghi789...", +} + +allow if { + input.image.host == "dhi.io" + some repo, digest in allowed_dhi + input.image.repo == repo + input.image.checksum == digest +} + +# TODO: Add your pinned Git dependencies +allowed_git := { + "https://github.com/moby/buildkit.git": { + "tag": "v0.26.1", + "commit": "abc123...", + }, +} + +allow if { + some url, version in allowed_git + input.git.remote == url + input.git.tagName == version.tag + input.git.commitChecksum == version.commit +} + +# TODO: Add your pinned HTTP downloads +allowed_downloads := { + "https://releases.example.com/app-v1.0.tar.gz": "sha256:def456...", +} + +allow if { + some url, checksum in allowed_downloads + input.http.url == url + input.http.checksum == checksum +} + +decision := {"allow": allow} +``` + +This template pins every external dependency to exact versions with cryptographic +verification. Images use digests, Git repos use commit SHAs, and downloads use +checksums. + +Customize: + +- Add all your dependencies with exact versions/checksums +- Maintain this file when updating dependencies +- Consider automating updates through CI/CD + +When to use: Maximum reproducibility and security. Ensures builds always use +exact versions of all dependencies. Required for high-security or regulated +environments. + +### Manual signature verification + +Verify image signatures by inspecting signature metadata fields. + +```rego +package docker + +default allow := false + +allow if input.local + +# Require valid GitHub Actions signatures +allow if { + input.image + input.image.hasProvenance + some sig in input.image.signatures + valid_github_signature(sig) +} + +# Helper function to validate GitHub Actions signature +valid_github_signature(sig) if { + # Sigstore keyless signing + sig.signer.certificateIssuer == "CN=sigstore-intermediate,O=sigstore.dev" + sig.signer.issuer == "https://token.actions.githubusercontent.com" + + # TODO: Replace with your organization + startswith(sig.signer.buildSignerURI, "https://github.com/myorg/.github/workflows/") + startswith(sig.signer.sourceRepositoryURI, "https://github.com/myorg/") + + # Verify GitHub hosted runner + sig.signer.runnerEnvironment == "github-hosted" + + # Require timestamp + count(sig.timestamps) > 0 +} + +decision := {"allow": allow} +``` + +This policy validates that images were built by GitHub Actions using Sigstore +keyless signing. + +Customize: + +- Replace `myorg` with your GitHub organization +- Adjust workflow path restrictions +- Add additional signature field checks as needed + +When to use: Enforce that images are built by CI/CD with verifiable signatures, +not manually pushed by developers. + +## Next steps + +- Write unit tests for your policies: [Test build policies](./testing.md) +- Review [Built-in functions](./built-ins.md) for signature verification and + attestation checking +- Check the [Input reference](./inputs.md) for all available fields you can + validate +- Read the tutorials for detailed explanations: + [Introduction](./intro.md), [Image validation](./validate-images.md), [Git + validation](./validate-git.md) diff --git a/content/manuals/build/policies/inputs.md b/content/manuals/build/policies/inputs.md new file mode 100644 index 00000000000..43aad88629c --- /dev/null +++ b/content/manuals/build/policies/inputs.md @@ -0,0 +1,539 @@ +--- +title: Input reference +linkTitle: Input reference +description: Reference documentation for policy input fields +keywords: build policies, input reference, policy fields, image metadata, git metadata +weight: 80 +--- + +When Buildx evaluates policies, it provides information about build inputs +through the `input` object. The structure of `input` depends on the type of +resource your Dockerfile references. + +## Input types + +Build inputs correspond to Dockerfile instructions: + +| Dockerfile instruction | Input type | Access pattern | +| --------------------------------------- | ---------- | -------------- | +| `FROM alpine:latest` | Image | `input.image` | +| `COPY --from=builder /app /app` | Image | `input.image` | +| `ADD https://example.com/file.tar.gz /` | HTTP | `input.http` | +| `ADD git@github.com:user/repo.git /src` | Git | `input.git` | +| Build context (`.`) | Local | `input.local` | + +Each input type has specific fields available for policy evaluation. + +## HTTP inputs + +HTTP inputs represent files downloaded over HTTP or HTTPS using the `ADD` +instruction. + +### Example Dockerfile + +```dockerfile +FROM alpine +ADD --checksum=sha256:abc123... https://example.com/app.tar.gz /app.tar.gz +``` + +### Available fields + +#### `input.http.url` + +The complete URL of the resource. + +```rego +allow if { + input.http.url == "https://example.com/app.tar.gz" +} +``` + +#### `input.http.schema` + +The URL scheme (`http` or `https`). + +```rego +# Require HTTPS for all downloads +allow if { + input.http.schema == "https" +} +``` + +#### `input.http.host` + +The hostname from the URL. + +```rego +# Allow downloads from approved domains +allow if { + input.http.host == "cdn.example.com" +} +``` + +#### `input.http.path` + +The path component of the URL. + +```rego +allow if { + startswith(input.http.path, "/releases/") +} +``` + +#### `input.http.checksum` + +The checksum specified with `ADD --checksum=...`, if present. Empty string if +no checksum was provided. + +```rego +# Require checksums for all downloads +allow if { + input.http.checksum != "" +} +``` + +#### `input.http.hasAuth` + +Boolean indicating if the request includes authentication (HTTP basic auth or +bearer token). + +```rego +# Require authentication for internal servers +allow if { + input.http.host == "internal.company.com" + input.http.hasAuth +} +``` + +## Image inputs + +Image inputs represent container images from `FROM` instructions or +`COPY --from` references. + +### Example Dockerfile + +```dockerfile +FROM alpine:3.19@sha256:abc123... +COPY --from=builder:latest /app /app +``` + +### Available fields + +#### `input.image.ref` + +The complete image reference as written in the Dockerfile. + +```rego +allow if { + input.image.ref == "alpine:3.19@sha256:abc123..." +} +``` + +#### `input.image.host` + +The registry hostname. Docker Hub images use `"docker.io"`. + +```rego +# Only allow Docker Hub images +allow if { + input.image.host == "docker.io" +} + +# Only allow images from GitHub Container Registry +allow if { + input.image.host == "ghcr.io" +} +``` + +#### `input.image.repo` + +The repository name without the registry host. + +```rego +allow if { + input.image.repo == "library/alpine" +} +``` + +#### `input.image.fullRepo` + +The full repository path including registry host. + +```rego +allow if { + input.image.fullRepo == "docker.io/library/alpine" +} +``` + +#### `input.image.tag` + +The tag portion of the reference. Empty if using a digest reference. + +```rego +# Allow only specific tags +allow if { + input.image.tag == "3.19" +} +``` + +#### `input.image.isCanonical` + +Boolean indicating if the reference uses a digest (`@sha256:...`). + +```rego +# Require digest references +allow if { + input.image.isCanonical +} +``` + +#### `input.image.checksum` + +The SHA256 digest of the image manifest. + +```rego +allow if { + input.image.checksum == "sha256:abc123..." +} +``` + +#### `input.image.platform` + +The target platform for multi-platform images. + +```rego +allow if { + input.image.platform == "linux/amd64" +} +``` + +#### `input.image.os` + +The operating system from the image configuration. + +```rego +allow if { + input.image.os == "linux" +} +``` + +#### `input.image.arch` + +The CPU architecture from the image configuration. + +```rego +allow if { + input.image.arch == "amd64" +} +``` + +#### `input.image.hasProvenance` + +Boolean indicating if the image has provenance attestations. + +```rego +# Require provenance for production images +allow if { + input.image.hasProvenance +} +``` + +#### `input.image.labels` + +A map of image labels from the image configuration. + +```rego +# Check for specific labels +allow if { + input.image.labels["org.opencontainers.image.vendor"] == "Example Corp" +} +``` + +#### `input.image.signatures` + +Array of attestation signatures. Each signature in the array has the following +fields: + +- `kind`: Signature kind (e.g., `"docker-github-builder"`, `"self-signed"`) +- `type`: Signature type (e.g., `"bundle-v0.3"`, `"simplesigning-v1"`) +- `timestamps`: Trusted timestamps from transparency logs +- `dockerReference`: Docker image reference +- `isDHI`: Boolean indicating if this is a Docker Hardened Image +- `signer`: Sigstore certificate details + +```rego +# Require at least one signature +allow if { + count(input.image.signatures) > 0 +} +``` + +For Sigstore signatures, the `signer` object provides detailed certificate +information from the signing workflow: + +- `certificateIssuer`: Certificate issuer +- `subjectAlternativeName`: Subject alternative name from certificate +- `buildSignerURI`: URI of the build signer +- `buildSignerDigest`: Digest of the build signer +- `runnerEnvironment`: CI/CD runner environment +- `sourceRepositoryURI`: Source repository URL +- `sourceRepositoryDigest`: Source repository digest +- `sourceRepositoryRef`: Source repository ref (branch/tag) +- `sourceRepositoryIdentifier`: Source repository identifier +- `sourceRepositoryOwnerURI`: Repository owner URI +- `buildConfigURI`: Build configuration URI +- `buildTrigger`: What triggered the build +- `runInvocationURI`: CI/CD run invocation URI + +```rego +# Require signatures from GitHub Actions +allow if { + some sig in input.image.signatures + sig.signer.runnerEnvironment == "github-hosted" + startswith(sig.signer.sourceRepositoryURI, "https://github.com/myorg/") +} +``` + +## Git inputs + +Git inputs represent Git repositories referenced in `ADD` instructions or used +as build context. + +### Example Dockerfile + +```dockerfile +ADD git@github.com:moby/buildkit.git#v0.12.0 /src +``` + +### Available fields + +#### `input.git.schema` + +The URL scheme (`https`, `http`, `git`, or `ssh`). + +```rego +# Require HTTPS for Git clones +allow if { + input.git.schema == "https" +} +``` + +#### `input.git.host` + +The Git host (e.g., `github.com`, `gitlab.com`). + +```rego +allow if { + input.git.host == "github.com" +} +``` + +#### `input.git.remote` + +The complete Git URL. + +```rego +allow if { + input.git.remote == "https://github.com/moby/buildkit.git" +} +``` + +#### `input.git.ref` + +The Git reference. + +```rego +allow if { + input.git.ref == "refs/heads/master" +} +``` + +#### `input.git.tagName` + +The tag name if the reference is a tag. + +```rego +# Only allow version tags +allow if { + regex.match(`^v[0-9]+\.[0-9]+\.[0-9]+$`, input.git.tagName) +} +``` + +#### `input.git.branch` + +The branch name if the reference is a branch. + +```rego +allow if { + input.git.branch == "main" +} +``` + +#### `input.git.subDir` + +The subdirectory path within the repository, if specified. + +```rego +# Ensure clones are from the root +allow if { + input.git.subDir == "" +} +``` + +#### `input.git.isCommitRef` + +Boolean indicating if the reference is a commit SHA (as opposed to a branch or +tag name). + +```rego +# Require commit SHAs for production +allow if { + input.env.target == "production" + input.git.isCommitRef +} +``` + +#### `input.git.checksum` + +The checksum of the Git reference. For commit references and branches, this is +the commit hash. For annotated tags, this is the tag object hash. + +```rego +allow if { + input.git.checksum == "abc123..." +} +``` + +#### `input.git.commitChecksum` + +The commit hash that the reference points to. For annotated tags, this differs +from `checksum` (which is the tag object hash). For commit references and +branches, this is the same as `checksum`. + +```rego +allow if { + input.git.commitChecksum == "abc123..." +} +``` + +#### `input.git.isAnnotatedTag` + +Boolean indicating if the reference is an annotated tag (as opposed to a +lightweight tag). + +```rego +# Require annotated tags +allow if { + input.git.tagName != "" + input.git.isAnnotatedTag +} +``` + +#### `input.git.commit` + +Object containing commit metadata: + +- `author`: Author name, email, when +- `committer`: Committer name, email, when +- `message`: Commit message +- `pgpSignature`: PGP signature details if signed +- `sshSignature`: SSH signature details if signed + +```rego +# Check commit author +allow if { + input.git.commit.author.email == "maintainer@example.com" +} +``` + +#### `input.git.tag` + +Object containing tag metadata for annotated tags: + +- `tagger`: Tagger name, email, when +- `message`: Tag message +- `pgpSignature`: PGP signature details if signed +- `sshSignature`: SSH signature details if signed + +```rego +# Require signed tags +allow if { + input.git.tag.pgpSignature != null +} +``` + +## Local inputs + +Local inputs represent the build context directory. + +### Available fields + +#### `input.local.name` + +The name or path of the local context. + +```rego +allow if { + input.local.name == "." +} +``` + +Local inputs are typically less restricted than remote inputs, but you can +still write policies to enforce context requirements. + +## Environment fields + +The `input.env` object provides build configuration information set by user on +invoking the build, not specific to a resource type. + +### Available fields + +#### `input.env.filename` + +The name of the Dockerfile being built. + +```rego +# Stricter rules for production Dockerfile +allow if { + input.env.filename == "Dockerfile" + input.image.isCanonical +} + +# Relaxed rules for development +allow if { + input.env.filename == "Dockerfile.dev" +} +``` + +#### `input.env.target` + +The build target from multi-stage builds. + +```rego +# Require signing only for release builds +allow if { + input.env.target == "release" + input.git.tagName != "" + verify_git_signature(input.git.tag, "maintainer.asc") +} +``` + +#### `input.env.args` + +Build arguments passed with `--build-arg`. Access specific arguments by key. + +```rego +# Check build argument values +allow if { + input.env.args.ENVIRONMENT == "production" + input.image.hasProvenance +} +``` + +## Next steps + +- See [Built-in functions](./built-ins.md) for built-in helper functions to + check and validate input properties +- Browse [Example policies](./examples.md) for common patterns +- Read about [Rego](https://www.openpolicyagent.org/docs/latest/policy-language/) + for advanced policy logic diff --git a/content/manuals/build/policies/intro.md b/content/manuals/build/policies/intro.md new file mode 100644 index 00000000000..ab27f47b835 --- /dev/null +++ b/content/manuals/build/policies/intro.md @@ -0,0 +1,334 @@ +--- +title: Introduction to build policies +linkTitle: Introduction +description: Get started with writing and evaluating build policies +keywords: build policies, opa, rego, policy tutorial, docker build, security +weight: 10 +--- + +Build policies let you validate the inputs to your Docker builds before they +run. This tutorial walks you through creating your first policy, teaching the +Rego basics you need along the way. + +## What you'll learn + +By the end of this tutorial, you'll understand: + +- How to create and organize policy files +- Basic Rego syntax and patterns +- How to write policies that validate URLs, checksums, and images +- How policies evaluate during builds + +## Prerequisites + +- Buildx version 0.31 or later +- Basic familiarity with Dockerfiles and building images + +## How policies work + +When you build an image, Buildx resolves all the inputs your +Dockerfile references: base images from `FROM` instructions, files +from `ADD` or `COPY` or build contexts, and Git repositories. Before +running the build, Buildx evaluates your policies against these +inputs. If any input violates a policy, the build fails before any +instructions execute. + +Policies are written in Rego, a declarative language designed for expressing +rules and constraints. You don't need to know Rego to get started - this +tutorial teaches you what you need. + +## Create your first policy + +Create a new directory for this tutorial and add a Dockerfile: + +```console +$ mkdir policy-tutorial +$ cd policy-tutorial +``` + +Create a `Dockerfile` that downloads a file with `ADD`: + +```dockerfile +FROM scratch +ADD https://example.com/index.html /index.html +``` + +Now create a policy file. Policies use the `.rego` extension and live alongside +your Dockerfile. Create `Dockerfile.rego`: + +```rego {title="Dockerfile.rego"} +package docker + +default allow := false + +allow if input.local +allow if { + input.http.host == "example.com" +} + +decision := {"allow": allow} +``` + +Save this file as `Dockerfile.rego` in the same directory as your Dockerfile. + +Let's break down what this policy does: + +- `package docker` - All build policies must start with this package declaration +- `default allow := false` - This example uses a deny-by-default rule: if inputs do not match an `allow` rule, the policy check fails +- `allow if input.local` - The first rule allows any local files (your build context) +- `allow if { input.http.host == "example.com" }` - The second rule allows HTTP downloads from `example.com` +- `decision := {"allow": allow}` - The final decision object tells Buildx whether to allow or deny the input + +This policy says: "Only allow local files and HTTP downloads from +`example.com`". Rego evaluates all the policy rules to figure out the return +value for the `decision` variable, for each build input. The evaluations happen +in parallel and on-demand; the order of the policy rules has no significance. + +### About `input.local` + +You'll see `allow if input.local` in nearly every policy. This rule allows +local file access, which includes your build context (typically, the `.` +directory) and importantly, the Dockerfile itself. Without this rule, Buildx +can't read your Dockerfile to start the build. + +Even builds that don't reference any files from the build context often need +`input.local` because the Dockerfile is a local file. The policy evaluates +before the build starts, and denying local access means denying access to the +Dockerfile. + +In rare cases, you might want stricter local file policies - for example, in CI +builds where the build context uses a Git URL as a context directly. In these +cases, you may want to deny local sources to prevent untracked files or changes +from making their way into your build. + +## Automatic policy loading + +Buildx automatically loads policies that match your Dockerfile name. When you +build with `Dockerfile`, Buildx looks for `Dockerfile.rego` in the same +directory. For a file named `app.Dockerfile`, it looks for +`app.Dockerfile.rego`. + +This automatic loading means you don't need any command-line flags in most +cases - create the policy file and build. + +The policy file must be in the same directory as the Dockerfile. If Buildx +can't find a matching policy, the build proceeds without policy evaluation +(unless you use `--policy strict=true`). + +For more control over policy loading, see the [Usage guide](./usage.md). + +## Run a build with your policy + +Build the image with policy evaluation enabled: + +```console +$ docker build . +``` + +The build succeeds because the URL in your Dockerfile matches the policy. Now +try changing the URL in your Dockerfile to something else: + +```dockerfile +FROM scratch +ADD https://api.github.com/users/octocat /user.json +``` + +Build again: + +```console +$ docker build . +``` + +This time the build fails with a policy violation. The `api.github.com` +hostname doesn't match the rule in your policy, so Buildx rejects it before +running any build steps. + +## Debugging policy failures + +If your build fails with a policy violation, use `--progress=plain` to see +exactly what went wrong: + +```console +$ docker buildx build --progress=plain . +``` + +This shows all policy checks, the input data for each source, and allow/deny +decisions. For complete debugging guidance, see [Debugging](./debugging.md). + +## Add helpful error messages + +When a policy denies an input, users see a generic error message. You can +provide custom messages that explain why the build was denied: + +```rego {title="Dockerfile.rego"} +package docker + +default allow := false + +allow if input.local +allow if { + input.http.host == "example.com" + input.http.schema == "https" +} + +deny_msg contains msg if { + not allow + input.http + msg := "only HTTPS downloads from example.com are allowed" +} + +decision := {"allow": allow, "deny_msg": deny_msg} +``` + +Now when a build is denied, users see your custom message explaining what went +wrong: + +```console +$ docker buildx build . +Policy: only HTTPS downloads from example.com are allowed +ERROR: failed to build: ... source not allowed by policy +``` + +The `deny_msg` rule uses `contains` to add messages to a set. You can add +multiple deny messages for different failure conditions to help users understand +exactly what needs to change. + +## Understand Rego rules + +Rego policies are built from rules. A rule defines when something is allowed. +The basic pattern is: + +```rego +allow if { + condition_one + condition_two + condition_three +} +``` + +All conditions must be true for the rule to match. Think of it as an AND +operation - the URL must match AND the checksum must match AND any other +conditions you specify. + +You can have multiple `allow` rules in one policy. If any rule matches, the +input is allowed: + +```rego +# Allow downloads from example.com +allow if { + input.http.host == "example.com" +} + +# Also allow downloads from api.github.com +allow if { + input.http.host == "api.github.com" +} +``` + +This works like OR - the input can match the first rule OR the second rule. + +## Access input fields + +The `input` object gives you access to information about build inputs. The +structure depends on the input type: + +- `input.http` - Files downloaded with `ADD https://...` +- `input.image` - Container images from `FROM` or `COPY --from` +- `input.git` - Git repositories from `ADD git://...` or build context +- `input.local` - Local file context + +Refer to the [Input reference](./inputs.md) for all available input fields. + +For HTTP downloads, you can access: + +| Key | Description | Example | +| ------------------- | ---------------------------------- | -------------------------------- | +| `input.http.url` | The full URL | `https://example.com/index.html` | +| `input.http.schema` | The protocol (HTTP/HTTPS) | `https` | +| `input.http.host` | The hostname | `example.com` | +| `input.http.path` | The URL path, including parameters | `/index.html` | + +Update your policy to require HTTPS: + +```rego +package docker + +default allow := false + +allow if { + input.http.host == "example.com" + input.http.schema == "https" +} + +decision := {"allow": allow} +``` + +Now the policy requires both the hostname to be `example.com` and the protocol +to be HTTPS. HTTP URLs (without TLS) would fail the policy check. + +## Pattern matching and strings + +Rego provides [built-in functions] for pattern matching. Use `startswith()` to +match URL prefixes: + +[built-in functions]: https://www.openpolicyagent.org/docs/policy-language#built-in-functions + +```rego +allow if { + startswith(input.http.url, "https://example.com/") +} +``` + +This allows any URL that starts with `https://example.com/`. + +Use `regex.match()` for complex patterns: + +```rego +allow if { + regex.match(`^https://example\.com/.+\.json$`, input.http.url) +} +``` + +This matches URLs that: + +- Start with `https://example.com/` +- End with `.json` +- Have at least one character between the domain and extension + +## Policy file location + +Policy files live adjacent to the Dockerfile they validate, using the naming +pattern `.rego`: + +```text +project/ +├── Dockerfile # Main Dockerfile +├── Dockerfile.rego # Policy for Dockerfile +├── lint.Dockerfile # Linting Dockerfile +└── lint.Dockerfile.rego # Policy for lint.Dockerfile +``` + +When you build, Buildx automatically loads the corresponding policy file: + +```console +$ docker buildx build -f Dockerfile . # Loads Dockerfile.rego +$ docker buildx build -f lint.Dockerfile . # Loads lint.Dockerfile.rego +``` + +## Next steps + +You now understand how to write basic build policies for HTTP resources. To +continue learning: + +- Apply and test policies: [Using build policies](./usage.md) +- Learn [Image validation](./validate-images.md) to validate container images + from `FROM` instructions +- Learn [Git validation](./validate-git.md) to validate Git repositories used + in builds +- See [Example policies](./examples.md) for copy-paste-ready policies covering + common scenarios +- Write unit tests for your policies: [Test build policies](./testing.md) +- Debug policy failures: [Debugging](./debugging.md) +- Read the [Input reference](./inputs.md) for all available input fields +- Check the [Built-in functions](./built-ins.md) for signature verification, + attestations, and other security checks diff --git a/content/manuals/build/policies/testing.md b/content/manuals/build/policies/testing.md new file mode 100644 index 00000000000..300f2d3741f --- /dev/null +++ b/content/manuals/build/policies/testing.md @@ -0,0 +1,214 @@ +--- +title: Test build policies +linkTitle: Testing +description: Write and run unit tests for build policies, similar to the opa test command +keywords: build policies, opa, rego, testing, unit tests, policy validation +weight: 60 +--- + +The [`docker buildx policy test`](/reference/cli/docker/buildx/policy/test/) +command runs unit tests for build policies using OPA's [standard test +framework](https://www.openpolicyagent.org/docs/policy-testing). + +```console +$ docker buildx policy test +``` + +This validates policy logic with mocked inputs. + +For testing against real sources (actual image metadata, Git repositories), use +[`docker buildx policy eval`](/reference/cli/docker/buildx/policy/eval/) +instead. You can use the `eval --print` option to resolve input for a specific +source for writing a test case. + +## Basic example + +Start with a simple policy that only allows `alpine` images: + +```rego {title="Dockerfile.rego"} +package docker + +default allow = false + +allow if { + input.image.repo == "alpine" +} + +decision := {"allow": allow} +``` + +Create a test file with the `*_test.rego` suffix. Test functions must start +with `test_`: + +```rego {title="Dockerfile_test.rego"} +package docker + +test_alpine_allowed if { + decision.allow with input as {"image": {"repo": "alpine"}} +} + +test_ubuntu_denied if { + not decision.allow with input as {"image": {"repo": "ubuntu"}} +} +``` + +Run the tests: + +```console +$ docker buildx policy test . +test_alpine_allowed: PASS (allow=true) +test_ubuntu_denied: PASS (allow=false) +``` + +`PASS` indicates that the tests defined in `Dockerfile_test.rego` executed +successfully and all assertions were satisfied. + +## Command options + +Filter tests by name with `--run`: + +```console +$ docker buildx policy test --run alpine . +test_alpine_allowed: PASS (allow=true) +``` + +Test policies with non-default filenames using `--filename`: + +```console +$ docker buildx policy test --filename app.Dockerfile . +``` + +This loads `app.Dockerfile.rego` and runs `*_test.rego` files against it. + +## Test output + +Passed tests show the allow status and any deny messages: + +```console +test_alpine_allowed: PASS (allow=true) +test_ubuntu_denied: PASS (allow=false, deny_msg=only alpine images are allowed) +``` + +Failed tests show input, decision output, and missing fields: + +```console +test_invalid: FAIL (allow=false) +input: + { + "image": {} + } +decision: + { + "allow": false, + "deny_msg": [ + "only alpine images are allowed" + ] + } +missing_input: input.image.repo +``` + +## Test deny messages + +To test custom error messages, capture the full decision result and assert on +the `deny_msg` field. + +For a policy with deny messages: + +```rego {title="Dockerfile.rego"} +package docker + +default allow = false + +allow if { + input.image.repo == "alpine" +} + +deny_msg contains msg if { + not allow + msg := "only alpine images are allowed" +} + +decision := {"allow": allow, "deny_msg": deny_msg} +``` + +Test the deny message: + +```rego {title="Dockerfile_test.rego"} +test_deny_message if { + result := decision with input as {"image": {"repo": "ubuntu"}} + not result.allow + "only alpine images are allowed" in result.deny_msg +} +``` + +## Test patterns + +**Test environment-specific rules:** + +```rego +test_production_requires_digest if { + decision.allow with input as { + "env": {"target": "production"}, + "image": {"isCanonical": true} + } +} + +test_development_allows_tags if { + decision.allow with input as { + "env": {"target": "development"}, + "image": {"isCanonical": false} + } +} +``` + +**Test multiple registries:** + +```rego +test_dockerhub_allowed if { + decision.allow with input as { + "image": { + "ref": "docker.io/library/alpine", + "host": "docker.io", + "repo": "alpine" + } + } +} + +test_ghcr_allowed if { + decision.allow with input as { + "image": { + "ref": "ghcr.io/myorg/myapp", + "host": "ghcr.io", + "repo": "myorg/myapp" + } + } +} +``` + +For available input fields, see the [Input reference](./inputs.md). + +## Organize test files + +The test runner discovers all `*_test.rego` files recursively: + +```plaintext +build-policies/ +├── Dockerfile.rego +├── Dockerfile_test.rego +└── tests/ + ├── registries_test.rego + ├── signatures_test.rego + └── environments_test.rego +``` + +Run all tests: + +```console +$ docker buildx policy test . +``` + +Or test specific files: + +```console +$ docker buildx policy test tests/registries_test.rego +``` diff --git a/content/manuals/build/policies/usage.md b/content/manuals/build/policies/usage.md new file mode 100644 index 00000000000..c7ba7561611 --- /dev/null +++ b/content/manuals/build/policies/usage.md @@ -0,0 +1,493 @@ +--- +title: Using build policies +linkTitle: Usage +description: Apply policies to builds and develop policies iteratively +keywords: build policies, policy eval, docker buildx, policy development, debugging +weight: 20 +--- + +Build policies validate inputs before builds execute. This guide covers how to +develop policies iteratively and apply them to real builds with `docker buildx +build` and `docker buildx bake`. + +## Prerequisites + +- Buildx 0.31.0 or later - Check your version: `docker buildx version` +- BuildKit 0.26.0 or later - Verify with: `docker buildx inspect + --bootstrap` + +If you're using Docker Desktop, ensure you're on a version that includes these +updates. + +## Policy development workflow + +Buildx automatically loads policies that match your Dockerfile name. When you +build with `Dockerfile`, Buildx looks for `Dockerfile.rego` in the same +directory. For a file named `app.Dockerfile`, it looks for +`app.Dockerfile.rego`. See the [Advanced: Policy configuration](#advanced-policy-configuration) +section for configuration options and manual policy loading. + +Writing policies is an iterative process: + +1. Start with a basic deny-all policy. +2. Build with debug logging to see what inputs your Dockerfile uses. +3. Add rules to allow specific sources based on the debug output. +4. Test and refine. + +### Viewing inputs from your Dockerfile + +To see the inputs that your Dockerfile references (images, Git repos, HTTP +downloads), build with debug logging: + +```console +$ docker buildx build --progress=plain --policy log-level=debug . +``` + +Example output for an image source: + +```text +#1 0.010 checking policy for source docker-image://alpine:3.19 (linux/arm64) +#1 0.011 policy input: { +#1 0.011 "env": { +#1 0.011 "filename": "." +#1 0.011 }, +#1 0.011 "image": { +#1 0.011 "ref": "docker.io/library/alpine:3.19", +#1 0.011 "host": "docker.io", +#1 0.011 "repo": "alpine", +#1 0.011 "tag": "3.19", +#1 0.011 "platform": "linux/arm64" +#1 0.011 } +#1 0.011 } +#1 0.011 unknowns for policy evaluation: [input.image.checksum input.image.labels ...] +#1 0.012 policy decision for source docker-image://alpine:3.19: ALLOW +``` + +This shows the complete input structure, which fields are unresolved, and the +policy decision for each source. See [Input reference](./inputs.md) for all +available fields. + +### Testing policies with policy eval + +Use [`docker buildx policy eval`](/reference/cli/docker/buildx/policy/eval/) to +test whether your policy allows a specific source without running a full build. + +Note: `docker buildx policy eval` tests the source specified as the argument. +It doesn't parse your Dockerfile to evaluate all inputs - for that, [build with +--progress=plain](#viewing-inputs-from-your-dockerfile). + +Test if your policy allows the local context: + +```console +$ docker buildx policy eval . +``` + +No output means the policy allowed the source. If denied, you see: + +```console +ERROR: policy denied +``` + +Test other sources: + +```console +$ docker buildx policy eval https://example.com # Test HTTP +$ docker buildx policy eval https://github.com/org/repo.git # Test Git +``` + +By default, `--print` shows reference information parsed from the source string +(like `repo`, `tag`, `host`) without fetching from registries. To inspect +metadata that requires fetching the source (like `labels`, `checksum`, or +`hasProvenance`), specify which fields to fetch with `--fields`: + +```console +$ docker buildx policy eval --print --fields image.labels docker-image://alpine:3.19 +``` + +Multiple fields can be specified as a comma-separated list. + +### Iterative development example + +Here's a practical workflow for developing policies: + +1. Start with basic deny-all policy: + + ```rego {title="Dockerfile.rego"} + package docker + + default allow := false + + allow if input.local + + decision := {"allow": allow} + ``` + +2. Build with debug logging to see what inputs your Dockerfile uses: + + ```console + $ docker buildx build --progress=plain --policy log-level=debug . + ``` + + The output shows the denied image and its input structure: + + ```text + #1 0.026 checking policy for source docker-image://docker.io/library/alpine:3.19 + #1 0.027 policy input: { + #1 0.027 "image": { + #1 0.027 "repo": "alpine", + #1 0.027 "tag": "3.19", + #1 0.027 ... + #1 0.027 } + #1 0.027 } + #1 0.028 policy decision for source docker-image://alpine:3.19: DENY + #1 ERROR: source "docker-image://alpine:3.19" not allowed by policy + ``` + +3. Add a rule allowing the alpine image: + + ```rego + allow if { + input.image.repo == "alpine" + } + ``` + +4. Build again to verify the policy works: + + ```console + $ docker buildx build . + ``` + +If it fails, see [Debugging](./debugging.md) for troubleshooting guidance. + +## Using policies with `docker build` + +Once you've developed and tested your policy, apply it to real builds. + +### Basic usage + +Create a policy alongside your Dockerfile: + +```dockerfile {title="Dockerfile"} +FROM alpine:3.19 +RUN echo "hello" +``` + +```rego {title="Dockerfile.rego"} +package docker + +default allow := false + +allow if input.local + +allow if { + input.image.repo == "alpine" +} + +decision := {"allow": allow} +``` + +Build normally: + +```console +$ docker buildx build . +``` + +Buildx loads the policy automatically and validates the `alpine:3.19` image +before building. + +### Build with different Dockerfile names + +Specify the Dockerfile with `-f`: + +```console +$ docker buildx build -f app.Dockerfile . +``` + +Buildx looks for `app.Dockerfile.rego` in the same directory. + +### Build with manual policy + +Add an extra policy to the automatic one: + +```console +$ docker buildx build --policy filename=extra-checks.rego . +``` + +Both `Dockerfile.rego` (automatic) and `extra-checks.rego` (manual) must pass. + +### Build without automatic policy + +Use only your specified policy: + +```console +$ docker buildx build --policy reset=true,filename=strict.rego . +``` + +## Using policies with bake + +[Bake](/build/bake/) supports automatic policy loading just like `docker buildx +build`. Place `Dockerfile.rego` alongside your Dockerfile and run: + +```console +$ docker buildx bake +``` + +### Manual policy in bake files + +Specify additional policies in your `docker-bake.hcl`: + +```hcl {title="docker-bake.hcl"} +target "default" { + dockerfile = "Dockerfile" + policy = ["extra.rego"] +} +``` + +The `policy` attribute takes a list of policy files. Bake loads these in +addition to the automatic `Dockerfile.rego` (if it exists). + +### Multiple policies in bake + +```hcl {title="docker-bake.hcl"} +target "webapp" { + dockerfile = "Dockerfile" + policy = [ + "shared/base-policy.rego", + "security/image-signing.rego" + ] +} +``` + +All policies must pass for the target to build successfully. + +### Different policies per target + +Apply different validation rules to different targets: + +```hcl {title="docker-bake.hcl"} +target "development" { + dockerfile = "dev.Dockerfile" + policy = ["policies/permissive.rego"] +} + +target "production" { + dockerfile = "prod.Dockerfile" + policy = ["policies/strict.rego", "policies/signing-required.rego"] +} +``` + +Build with the appropriate target: + +```console +$ docker buildx bake development # Uses permissive policy +$ docker buildx bake production # Uses strict policies +``` + +### Bake with policy options + +Currently, bake doesn't support policy options (reset, strict, disabled) in the +HCL file. Use command-line flags instead: + +```console +$ docker buildx bake --policy disabled=true production +``` + +## Testing in CI/CD + +Validate policies in continuous integration by running builds with the `--policy` flag. For unit testing policies before running builds, see [Test build policies](./testing.md). + +Test policies during CI builds: + +```yaml {title=".github/workflows/test-policies.yml"} +name: Test Build Policies +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@{{% param "checkout_action_version" %}} + - uses: docker/setup-buildx-action@{{% param "setup_buildx_action_version" %}} + - name: Test build with policy + run: docker buildx build --policy strict=true . +``` + +This ensures policy changes don't break builds and that new rules work as +intended. The `strict=true` flag fails the build if policies aren't loaded (for +example, if the BuildKit instance used by the build is too old and doesn't +support policies). + +## Advanced: Policy configuration + +This section covers advanced policy loading mechanisms and configuration +options. + +### Automatic policy loading + +Buildx automatically loads policies that match your Dockerfile name. When you +build with `Dockerfile`, Buildx looks for `Dockerfile.rego` in the same +directory. For a file named `app.Dockerfile`, it looks for +`app.Dockerfile.rego`. + +```text +project/ +├── Dockerfile +├── Dockerfile.rego # Loaded automatically for Dockerfile +├── app.Dockerfile +├── app.Dockerfile.rego # Loaded automatically for app.Dockerfile +└── src/ +``` + +This automatic loading means you don't need command-line flags in most cases. +Create the policy file alongside your Dockerfile and build: + +```console +$ docker buildx build . +``` + +Buildx detects `Dockerfile.rego` and evaluates it before running the build. + +> [!NOTE] +> Policy files must be in the same directory as the Dockerfile they validate. +> Buildx doesn't search parent directories or subdirectories. + +### When policies don't load + +If buildx can't find a matching `.rego` file, the build proceeds without policy +evaluation. To require policies and fail if none are found, use strict mode: + +```console +$ docker buildx build --policy strict=true . +``` + +This fails the build if no policy loads or if the BuildKit daemon doesn't +support policies. + +### Manual policy configuration + +The `--policy` flag lets you specify additional policies, override automatic +loading, or control policy behavior. + +Basic syntax: + +```console +$ docker buildx build --policy filename=custom.rego . +``` + +This loads `custom.rego` in addition to the automatic `Dockerfile.rego` (if it +exists). + +Multiple policies: + +```console +$ docker buildx build --policy filename=policy1.rego --policy filename=policy2.rego . +``` + +All policies must pass for the build to succeed. Use this to enforce layered +requirements (base policy + project-specific rules). + +Available options: + +| Option | Description | Example | +| ------------------- | ------------------------------------------------------- | ----------------------------- | +| `filename=` | Load policy from specified file | `filename=custom.rego` | +| `reset=true` | Ignore automatic policies, use only specified ones | `reset=true` | +| `disabled=true` | Disable all policy evaluation | `disabled=true` | +| `strict=true` | Fail if BuildKit doesn't support policies | `strict=true` | +| `log-level=` | Control policy logging (error, warn, info, debug, none). Use `debug` to see complete input JSON and unresolved fields | `log-level=debug` | + +Combine options with commas: + +```console +$ docker buildx build --policy filename=extra.rego,strict=true . +``` + +### Exploring sources with policy eval + +The `docker buildx policy eval` command lets you quickly explore and test +sources without running a build. + +#### Inspect input structure with --print + +Use `--print` to see the input structure for any source without running policy +evaluation: + +```console +$ docker buildx policy eval --print https://github.com/moby/buildkit.git +``` + +```json +{ + "git": { + "schema": "https", + "host": "github.com", + "remote": "https://github.com/moby/buildkit.git" + } +} +``` + +Test different source types: + +```console +# HTTP downloads +$ docker buildx policy eval --print https://releases.hashicorp.com/terraform/1.5.0/terraform.zip + +# Images (requires docker-image:// prefix) +$ docker buildx policy eval --print docker-image://alpine:3.19 + +# Local context +$ docker buildx policy eval --print . +``` + +Shows information parsed from the source without fetching. Use `--fields` to +fetch specific metadata (see [above](#testing-policies-with-policy-eval)). + +#### Test with specific policy files + +The `--filename` flag specifies which policy file to load by providing the base +Dockerfile name (without the `.rego` extension). This is useful for testing +sources against policies associated with different Dockerfiles. + +For example, to test a source against the policy for `app.Dockerfile`: + +```console +$ docker buildx policy eval --filename app.Dockerfile . +``` + +This loads `app.Dockerfile.rego` and tests whether it allows the source `.` +(the local directory). The flag defaults to `Dockerfile` if not specified. + +Test different sources against your policy: + +```console +$ docker buildx policy eval --filename app.Dockerfile https://github.com/org/repo.git +$ docker buildx policy eval --filename app.Dockerfile docker-image://alpine:3.19 +``` + +### Reset automatic loading + +To use only your specified policies and ignore automatic `.rego` files: + +```console +$ docker buildx build --policy reset=true,filename=custom.rego . +``` + +This skips `Dockerfile.rego` and loads only `custom.rego`. + +### Disable policies temporarily + +Disable policy evaluation for testing or emergencies: + +```console +$ docker buildx build --policy disabled=true . +``` + +The build proceeds without any policy checks. Use this carefully - you're +bypassing security controls. + +## Next steps + +- Write unit tests for your policies: [Test build policies](./testing.md) +- Debug policy failures: [Debugging](./debugging.md) +- Browse working examples: [Example policies](./examples.md) +- Reference all input fields: [Input reference](./inputs.md) diff --git a/content/manuals/build/policies/validate-git.md b/content/manuals/build/policies/validate-git.md new file mode 100644 index 00000000000..98ebf6a52e7 --- /dev/null +++ b/content/manuals/build/policies/validate-git.md @@ -0,0 +1,431 @@ +--- +title: Validating Git repositories +linkTitle: Git validation +description: Write policies to validate Git repositories used in your builds +keywords: build policies, git validation, git signatures, gpg, signed commits, signed tags +weight: 40 +--- + +Git repositories often appear in Docker builds as source code inputs. The `ADD` +instruction can clone repositories, and build contexts can reference Git URLs. +Validating these inputs ensures you're building from trusted sources with +verified versions. + +This guide teaches you to write policies that validate Git inputs, from basic +version pinning to verifying signed commits and tags. + +## Prerequisites + +You should understand the policy basics from the [Introduction](./intro.md): +creating policy files, basic Rego syntax, and how policies evaluate during +builds. + +## What are Git inputs? + +Git inputs come from `ADD` instructions that reference Git repositories: + +```dockerfile +# Clone a specific tag +ADD https://github.com/moby/buildkit.git#v0.26.1 /buildkit + +# Clone a branch +ADD https://github.com/user/repo.git#main /src + +# Clone a commit +ADD https://github.com/user/repo.git#abcde123 /src +``` + +The build context can also be a Git repository when you build with: + +```console +$ docker build https://github.com/user/repo.git#main +``` + +Each Git reference triggers a policy evaluation. Your policy can inspect +repository URLs, validate versions, check commit metadata, and verify +signatures. + +## Match specific repositories + +The simplest Git policy restricts which repositories can be used: + +```rego {title="Dockerfile.rego"} +package docker + +default allow := false + +allow if input.local + +allow if { + input.git.host == "github.com" + input.git.remote == "https://github.com/moby/buildkit.git" +} + +decision := {"allow": allow} +``` + +This policy: + +- Denies all inputs by default +- Allows local build context +- Allows only the BuildKit repository from GitHub + +The `host` field contains the Git server hostname, and `remote` contains the +full repository URL. Test it: + +```dockerfile {title="Dockerfile"} +FROM scratch +ADD https://github.com/moby/buildkit.git#v0.26.1 / +``` + +```console +$ docker build . +``` + +The build succeeds. Try a different repository and it fails. + +You can match multiple repositories with additional rules: + +```rego +allow if { + input.git.host == "github.com" + input.git.remote == "https://github.com/moby/buildkit.git" +} + +allow if { + input.git.host == "github.com" + input.git.remote == "https://github.com/docker/cli.git" +} + +decision := {"allow": allow} +``` + +## Pin to specific versions + +Tags and branches can change over time. Pin to specific versions to ensure +reproducible builds: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.git.remote == "https://github.com/moby/buildkit.git" + input.git.tagName == "v0.26.1" +} + +decision := {"allow": allow} +``` + +The `tagName` field contains the tag name when the Git reference points to a +tag. Use `branch` for branches: + +```rego +allow if { + input.git.remote == "https://github.com/user/repo.git" + input.git.branch == "main" +} +``` + +Or use `ref` for any type of reference (branch, tag, or commit SHA): + +```rego +allow if { + input.git.ref == "v0.26.1" +} +``` + +## Use version allowlists + +For repositories you trust but want to control versions, maintain an allowlist: + +```rego +package docker + +default allow := false + +allowed_versions = [ + {"tag": "v0.26.1", "annotated": true, "sha": "abc123"}, +] + +is_buildkit if { + input.git.remote == "https://github.com/moby/buildkit.git" +} + +allow if { + not is_buildkit +} + +allow if { + is_buildkit + some version in allowed_versions + input.git.tagName == version.tag + input.git.isAnnotatedTag == version.annotated + startswith(input.git.commitChecksum, version.sha) +} + +decision := {"allow": allow} +``` + +This policy: + +- Defines an allowlist of approved versions with metadata +- Uses a helper rule (`is_buildkit`) for readability +- Allows all non-BuildKit inputs +- For BuildKit, checks the tag name, whether it's an annotated tag, and the commit SHA against the allowlist + +The helper rule makes complex policies more maintainable. You can expand the +allowlist as new versions are approved: + +```rego +allowed_versions = [ + {"tag": "v0.26.1", "annotated": true, "sha": "abc123"}, + {"tag": "v0.27.0", "annotated": true, "sha": "def456"}, + {"tag": "v0.27.1", "annotated": true, "sha": "789abc"}, +] +``` + +## Validate with regex patterns + +Use pattern matching for semantic versioning: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.git.remote == "https://github.com/moby/buildkit.git" + regex.match(`^v[0-9]+\.[0-9]+\.[0-9]+$`, input.git.tagName) +} + +decision := {"allow": allow} +``` + +This allows any BuildKit tag matching the pattern `vX.Y.Z` where X, Y, and Z +are numbers. The regex ensures you're using release versions, not pre-release +tags like `v0.26.0-rc1`. + +Match major versions: + +```rego +# Only allow v0.x releases +allow if { + input.git.remote == "https://github.com/moby/buildkit.git" + regex.match(`^v0\.[0-9]+\.[0-9]+$`, input.git.tagName) +} +``` + +## Inspect commit metadata + +The `commit` object provides detailed information about commits: + +```rego +package docker + +default allow := false + +allow if input.local + +# Check commit author +allow if { + input.git.remote == "https://github.com/user/repo.git" + input.git.commit.author.email == "trusted@example.com" +} + +decision := {"allow": allow} +``` + +The `commit` object includes: + +- `author.name`: Author's name +- `author.email`: Author's email +- `author.when`: When the commit was authored +- `committer.name`: Committer's name +- `committer.email`: Committer's email +- `committer.when`: When the commit was committed +- `message`: Commit message + +Validate commit messages: + +```rego +allow if { + input.git.commit + contains(input.git.commit.message, "Signed-off-by:") +} +``` + +Pin to specific commit SHA: + +```rego +allow if { + input.git.commitChecksum == "abc123def456..." +} +``` + +## Require signed commits + +GPG-signed commits prove authenticity. Check for commit signatures: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.git.remote == "https://github.com/moby/buildkit.git" + input.git.commit.pgpSignature != null +} + +decision := {"allow": allow} +``` + +The `pgpSignature` field is `null` for unsigned commits. For signed commits, it +contains signature details. + +SSH signatures work similarly: + +```rego +allow if { + input.git.commit.sshSignature != null +} +``` + +## Require signed tags + +Annotated tags can be signed, providing a cryptographic guarantee of the +release: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.git.remote == "https://github.com/moby/buildkit.git" + input.git.tag.pgpSignature != null +} + +decision := {"allow": allow} +``` + +The `tag` object is only available for annotated tags. It includes: + +- `tagger.name`: Who created the tag +- `tagger.email`: Tagger's email +- `tagger.when`: When the tag was created +- `message`: Tag message +- `pgpSignature`: GPP signature (if signed) +- `sshSignature`: SSH signature (if signed) + +Lightweight tags don't have a `tag` object, so this policy effectively requires +annotated, signed tags. + +## Verify signatures with public keys + +Use the `verify_git_signature()` function to cryptographically verify Git +signatures against trusted public keys: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.git.remote == "https://github.com/moby/buildkit.git" + input.git.tagName != "" + verify_git_signature(input.git.tag, "keys.asc") +} + +decision := {"allow": allow} +``` + +This verifies that Git tags are signed by keys in the `keys.asc` public +key file. To set this up: + +1. Export maintainer public keys: + ```console + $ curl https://github.com/user.gpg > keys.asc + ``` +2. Place `keys.asc` alongside your policy file + +The function verifies PGP signatures on commits or tags. See [Built-in +functions](./built-ins.md) for more details. + +## Apply conditional rules + +Use different rules for different contexts. Allow unsigned refs during +development but require signing for production: + +```rego +package docker + +default allow := false + +allow if input.local + +is_buildkit if { + input.git.remote == "https://github.com/moby/buildkit.git" +} + +is_version_tag if { + is_buildkit + regex.match(`^v[0-9]+\.[0-9]+\.[0-9]+$`, input.git.tagName) +} + +# Version tags must be signed +allow if { + is_version_tag + input.git.tagName != "" + verify_git_signature(input.git.tag, "keys.asc") +} + +# Non-version refs allowed in development +allow if { + is_buildkit + not is_version_tag + input.env.target != "release" +} + +decision := {"allow": allow} +``` + +This policy: + +- Defines helper rules for readability +- Requires signed version tags from maintainers +- Allows unsigned refs (branches, commits) unless building the release target +- Uses `input.env.target` to detect the build target + +Build a development target without signatures: + +```console +$ docker buildx build --target=dev . +``` + +Build the release target, and signing is enforced: + +```console +$ docker buildx build --target=release . +``` + +## Next steps + +You now understand how to validate Git repositories in build policies. To +continue learning: + +- Browse [Example policies](./examples.md) for complete policy patterns +- Read [Built-in functions](./built-ins.md) for Git signature verification + functions +- Check the [Input reference](./inputs.md) for all available Git fields diff --git a/content/manuals/build/policies/validate-images.md b/content/manuals/build/policies/validate-images.md new file mode 100644 index 00000000000..3f4b4dd951a --- /dev/null +++ b/content/manuals/build/policies/validate-images.md @@ -0,0 +1,424 @@ +--- +title: Validating image inputs +linkTitle: Image validation +description: Write policies to validate container images used in your builds +keywords: build policies, image validation, docker images, provenance, attestations, signatures +weight: 30 +--- + +Container images are the most common build inputs. Every `FROM` instruction +pulls an image, and `COPY --from` references pull additional images. Validating +these images protects your build supply chain from compromised registries, +unexpected updates, and unauthorized base images. + +This guide teaches you to write policies that validate image inputs, +progressing from basic allowlisting to advanced attestation checks. + +## Prerequisites + +You should understand the policy basics from the [Introduction](./intro.md): +creating policy files, basic Rego syntax, and how policies evaluate during +builds. + +## What are image inputs? + +Image inputs come from two Dockerfile instructions: + +```dockerfile +# FROM instructions +FROM alpine:3.22 +FROM golang:1.25-alpine AS builder + +# COPY --from references +COPY --from=builder /app /app +COPY --from=nginx:latest /etc/nginx/nginx.conf /nginx.conf +``` + +Each of these references triggers a policy evaluation. Your policy can inspect +image metadata, verify attestations, and enforce constraints before the build +proceeds. + +## Allowlist specific repositories + +The simplest image policy restricts which repositories can be used. This +prevents developers from using arbitrary images that haven't been vetted. + +Create a policy that only allows Alpine: + +```rego {title="Dockerfile.rego"} +package docker + +default allow := false + +allow if input.local + +allow if { + input.image.repo == "alpine" +} + +decision := {"allow": allow} +``` + +This policy: + +- Denies all inputs by default +- Allows local build context +- Allows any image from the `alpine` repository (any tag or digest) + +Test it with a Dockerfile: + +```dockerfile {title="Dockerfile"} +FROM alpine +RUN echo "hello" +``` + +```console +$ docker build . +``` + +The build succeeds. Try changing to `FROM ubuntu`: + +```console +$ docker build . +``` + +The build fails because `ubuntu` doesn't match the allowed repository. + +## Compare semantic versions + +Restrict images to specific version ranges using Rego's `semver` functions: + +```rego +package docker + +default allow := false + +allow if input.local + +# Allow Go 1.21 or newer +allow if { + input.image.repo == "golang" + semver.is_valid(input.image.tag) + semver.compare(input.image.tag, "1.21.0") >= 0 +} + +decision := {"allow": allow} +``` + +The `semver.compare(a, b)` function compares semantic versions and returns: + +- `-1` if version `a` is less than `b` +- `0` if versions are equal +- `1` if version `a` is greater than `b` + +Use `semver.is_valid()` to check if a tag is a valid semantic version before +comparing. + +Restrict to specific version ranges: + +```rego +allow if { + input.image.repo == "node" + version := input.image.tag + semver.is_valid(version) + semver.compare(version, "20.0.0") >= 0 # 20.0.0 or newer + semver.compare(version, "21.0.0") < 0 # older than 21.0.0 +} +``` + +This allows only Node.js 20.x versions. The pattern works for any image using +semantic versioning. + +These `semver` functions are standard Rego built-ins documented in the [OPA +policy +reference](https://www.openpolicyagent.org/docs/latest/policy-reference/#semver). + +## Require digest references + +Tags like `alpine:3.22` can change - someone could push a new image with the +same tag. Digests like `alpine@sha256:abc123...` are immutable. + +### Requiring users to provide digests + +You can require that users always specify digests in their Dockerfiles: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.image.isCanonical +} + +decision := {"allow": allow} +``` + +The `isCanonical` field is `true` when the user's reference includes a digest. +This policy would allow: + +```dockerfile +FROM alpine@sha256:4b7ce07002c69e8f3d704a9c5d6fd3053be500b7f1c69fc0d80990c2ad8dd412 +``` + +But reject tag-only references like `FROM alpine:3.22`. + +### Pinning to specific digests + +Alternatively (or additionally), you can validate that an image's actual digest +matches a specific value, regardless of how the user wrote the reference: + +```rego +allow if { + input.image.repo == "alpine" + input.image.checksum == "sha256:4b7ce07002c69e8f3d704a9c5d6fd3053be500b7f1c69fc0d80990c2ad8dd412" +} + +decision := {"allow": allow} +``` + +This checks the actual content digest of the pulled image. It would allow both: + +```dockerfile +FROM alpine:3.22 +FROM alpine@sha256:4b7ce... +``` + +As long as the resolved image has the specified digest. This is useful for +pinning critical base images to known-good versions. + +## Restrict registries + +Control which registries your builds can pull from. This helps enforce +corporate policies or restrict to trusted sources. + +```rego +package docker + +default allow := false + +allow if input.local + +# Allow Docker Hub images +allow if { + input.image.host == "docker.io" # Docker Hub + input.image.repo == "alpine" +} + +# Allow images from internal registry +allow if { + input.image.host == "registry.company.com" +} + +decision := {"allow": allow} +``` + +The `host` field contains the registry hostname. Docker Hub images use +`"docker.io"` as the host value. Test with: + +```dockerfile +FROM alpine # Allowed (Docker Hub) +FROM registry.company.com/myapp:latest # Allowed (company registry) +FROM ghcr.io/someorg/image:latest # Denied (wrong registry) +``` + +Use `fullRepo` when you need the complete path including registry: + +```rego +allow if { + input.image.fullRepo == "docker.io/library/alpine" +} +``` + +## Validate platform constraints + +Multi-architecture images support different operating systems and CPU +architectures. You can restrict builds to specific platforms: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.image.os == "linux" + input.image.arch in ["amd64", "arm64"] +} + +decision := {"allow": allow} +``` + +This policy: + +- Defines supported architectures in a list +- Checks `input.image.os` matches Linux +- Verifies `input.image.arch` is in the supported list + +The `os` and `arch` fields come from the image manifest, reflecting the actual +image platform. This works with Docker's automatic platform selection - +policies validate what Buildx resolves, not what you specify. + +## Inspect image metadata + +Images contain metadata like environment variables, labels, and working +directories. You can validate these to ensure images meet requirements. + +Check for specific environment variables: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.image.repo == "golang" + input.image.workingDir == "/go" + some ver in input.image.env + startswith(ver, "GOLANG_VERSION=") + some toolchain in input.image.env + toolchain == "GOTOOLCHAIN=local" +} + +decision := {"allow": allow} +``` + +This policy validates the official Go image by checking: + +- The working directory is `/go` +- The environment has `GOLANG_VERSION` set +- The environment includes `GOTOOLCHAIN=local` + +The `input.image.env` field is an array of strings in `KEY=VALUE` format. +Use Rego's `some` iteration to search the array. + +Check image labels: + +```rego +allow if { + input.image.labels["org.opencontainers.image.vendor"] == "Example Corp" + input.image.labels["org.opencontainers.image.version"] != "" +} +``` + +The `labels` field is a map, so you access values with bracket notation. + +## Require attestations and provenance + +Modern images include [attestations](/build/metadata/attestations/): +machine-readable metadata about how the image was built. +[Provenance](/build/metadata/attestations/slsa-provenance/) attestations +describe the build process, and [SBOMs](/build/metadata/attestations/sbom/) +list the software inside. + +Require provenance: + +```rego +package docker + +default allow := false + +allow if input.local + +allow if { + input.image.hasProvenance +} + +decision := {"allow": allow} +``` + +The `hasProvenance` field is `true` when the image has provenance or SBOM +[attestations](../metadata/attestations/_index.md). + +## Verify GitHub Actions signatures + +For images built with GitHub Actions, verify they came from trusted workflows by +inspecting signature metadata: + +```rego +allow if { + input.image.repo == "myapp" + input.image.hasProvenance + some sig in input.image.signatures + valid_github_signature(sig) +} + +# Helper to validate GitHub Actions signature +valid_github_signature(sig) if { + sig.signer.certificateIssuer == "CN=sigstore-intermediate,O=sigstore.dev" + sig.signer.issuer == "https://token.actions.githubusercontent.com" + startswith(sig.signer.buildSignerURI, "https://github.com/myorg/") + sig.signer.runnerEnvironment == "github-hosted" +} + +decision := {"allow": allow} +``` + +This pattern works with any GitHub Actions workflow using Sigstore keyless +signing. The signature metadata provides cryptographic proof of the build's +origin. For complete signature verification examples, see [Example +policies](./examples.md). + +## Combine multiple checks + +Real policies often combine several checks. Multiple conditions in one `allow` +rule means AND - all must be true: + +```rego +package docker + +default allow := false + +allow if input.local + +# Production images need everything +allow if { + input.image.repo == "alpine" + input.image.isCanonical + input.image.hasProvenance +} + +decision := {"allow": allow} +``` + +Multiple `allow` rules means OR - any rule can match: + +```rego +package docker + +default allow := false + +allow if input.local + +# Allow Alpine with strict checks +allow if { + input.image.repo == "alpine" + input.image.isCanonical +} + +# Allow Go with different checks +allow if { + input.image.repo == "golang" + input.image.workingDir == "/go" +} + +decision := {"allow": allow} +``` + +Use this pattern to apply different requirements to different base images. + +## Next steps + +You now understand how to validate container images in build policies. To +continue learning: + +- Learn [Git repository validation](./validate-git.md) for source code inputs +- Browse [Example policies](./examples.md) for complete policy patterns +- Read [Built-in functions](./built-ins.md) for signature verification and + attestation checking +- Check the [Input reference](./inputs.md) for all available image fields diff --git a/content/manuals/build/release-notes.md b/content/manuals/build/release-notes.md index 14a60eef2e7..4b7b8e74532 100644 --- a/content/manuals/build/release-notes.md +++ b/content/manuals/build/release-notes.md @@ -1,1372 +1,7 @@ --- title: Build release notes weight: 120 -description: Learn about the new features, bug fixes, and breaking changes for the newest Buildx release -keywords: build, buildx, buildkit, release notes -tags: [Release notes] -toc_max: 2 ---- - -This page contains information about the new features, improvements, and bug -fixes in [Docker Buildx](https://github.com/docker/buildx). - -## 0.23.0 - -{{< release-date date="2025-04-15" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.23.0). - -### New - -- New `buildx history export` command allows exporting the build record into a bundle that can be imported to [Docker Desktop](/desktop/). [docker/buildx#3073](https://github.com/docker/buildx/pull/3073) - -### Enhancements - -- New `--local` and `--filter` flags allow filtering history records in `buildx history ls`. [docker/buildx#3091](https://github.com/docker/buildx/pull/3091) -- Compose compatibility has been updated to v2.6.0. [docker/buildx#3080](https://github.com/docker/buildx/pull/3080), [docker/buildx#3105](https://github.com/docker/buildx/pull/3105) -- Support CLI environment variables in standalone mode. [docker/buildx#3087](https://github.com/docker/buildx/pull/3087) - -### Bug fixes - -- Fix `--print` output for Bake producing output with unescaped variables that could cause build errors later. [docker/buildx#3097](https://github.com/docker/buildx/pull/3097) -- Fix `additional_contexts` field not working correctly when pointing to another service. [docker/buildx#3090](https://github.com/docker/buildx/pull/3090) -- Fix empty validation block crashing the Bake HCL parser. [docker/buildx#3101](https://github.com/docker/buildx/pull/3101) - -## 0.22.0 - -{{< release-date date="2025-03-18" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.22.0). - -### New - -- New command `buildx history import` lets you import build records into Docker Desktop for further debugging in the [Build UI](/desktop/use-desktop/builds/). This command requires [Docker Desktop](/desktop/) to be installed. [docker/buildx#3039](https://github.com/docker/buildx/pull/3039) - -### Enhancements - -- History records can now be opened by offset from the latest in `history inspect`, `history logs` and `history open` commands (e.g. `^1`). [docker/buildx#3049](https://github.com/docker/buildx/pull/3049), [docker/buildx#3055](https://github.com/docker/buildx/pull/3055) -- Bake now supports the `+=` operator to append when using `--set` for overrides. [docker/buildx#3031](https://github.com/docker/buildx/pull/3031) -- Docker container driver adds GPU devices to the container if available. [docker/buildx#3063](https://github.com/docker/buildx/pull/3063) -- Annotations can now be set when using overrides with Bake. [docker/buildx#2997](https://github.com/docker/buildx/pull/2997) -- NetBSD binaries are now included in the release. [docker/buildx#2901](https://github.com/docker/buildx/pull/2901) -- The `inspect` and `create` commands now return an error if a node fails to boot. [docker/buildx#3062](https://github.com/docker/buildx/pull/3062) - -### Bug fixes - -- Fix double pushing with Docker driver when the containerd image store is enabled. [docker/buildx#3023](https://github.com/docker/buildx/pull/3023) -- Fix multiple tags being pushed for `imagetools create` command. Now only the final manifest pushes by tag. [docker/buildx#3024](https://github.com/docker/buildx/pull/3024) - -## 0.21.0 - -{{< release-date date="2025-02-19" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.21.0). - -### New - -- New command `buildx history trace` lets you inspect traces of a build in a Jaeger UI-based viewer and compare one trace with another. [docker/buildx#2904](https://github.com/docker/buildx/pull/2904) - -### Enhancements - -- The history inspection command `buildx history inspect` now supports custom formatting with `--format` flag and JSON formatting for machine-readable output. [docker/buildx#2964](https://github.com/docker/buildx/pull/2964) -- Support for CDI device entitlement in build and bake. [docker/buildx#2994](https://github.com/docker/buildx/pull/2994) -- Supported CDI devices are now shown in the builder inspection. [docker/buildx#2983](https://github.com/docker/buildx/pull/2983) -- When using [GitHub Cache backend `type=gha`](cache/backends/gha.md), the URL for the Version 2 or API is now read from the environment and sent to BuildKit. Version 2 backend requires BuildKit v0.20.0 or later. [docker/buildx#2983](https://github.com/docker/buildx/pull/2983), [docker/buildx#3001](https://github.com/docker/buildx/pull/3001) - -### Bug fixes - -- Avoid unnecessary warnings and prompts when using `--progress=rawjson`. [docker/buildx#2957](https://github.com/docker/buildx/pull/2957) -- Fix regression with debug shell sometimes not working correctly on `--on=error`. [docker/buildx#2958](https://github.com/docker/buildx/pull/2958) -- Fix possible panic errors when using an unknown variable in the Bake definition. [docker/buildx#2960](https://github.com/docker/buildx/pull/2960) -- Fix invalid duplicate output on JSON format formatting of `buildx ls` command. [docker/buildx#2970](https://github.com/docker/buildx/pull/2970) -- Fix bake handling cache imports with CSV string containing multiple registry references. [docker/buildx#2944](https://github.com/docker/buildx/pull/2944) -- Fix issue where error from pulling BuildKit image could be ignored. [docker/buildx#2988](https://github.com/docker/buildx/pull/2988) -- Fix race on pausing progress on debug shell. [docker/buildx#3003](https://github.com/docker/buildx/pull/3003) - -## 0.20.1 - -{{< release-date date="2025-01-23" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.20.1). - -### Bug fixes - -- Fix `bake --print` output after missing some attributes for attestations. [docker/buildx#2937](https://github.com/docker/buildx/pull/2937) -- Fix allowing comma-separated image reference strings for cache import and export values. [docker/buildx#2944](https://github.com/docker/buildx/pull/2944) - -## 0.20.0 - -{{< release-date date="2025-01-20" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.20.0). - -> [!NOTE] -> -> This version of buildx enables filesystem entitlement checks for `buildx bake` -> command by default. If your Bake definition needs to read or write files -> outside your current working directory, you need to allow access to these -> paths with `--allow fs=`. On the terminal, you can also interactively -> approve these paths with the provided prompt. Optionally, you can disable -> these checks by setting `BUILDX_BAKE_ENTITLEMENTS_FS=0`. This validation -> produced a warning in Buildx v0.19.0+, but starting from current release it -> produces an error. For more information, see the [reference documentation](/reference/cli/docker/buildx/bake.md#allow). - -### New - -- New `buildx history` command has been added that allows working with build records of completed and running builds. You can use these commands to list, inspect, remove your builds, replay the logs of already completed builds, and quickly open your builds in Docker Desktop Build UI for further debugging. This is an early version of this command and we expect to add more features in the future releases. [#2891](https://github.com/docker/buildx/pull/2891), [#2925](https://github.com/docker/buildx/pull/2925) - -### Enhancements - -- Bake: Definition now supports new object notation for the fields that previously required CSV strings as inputs (`attest`, `output`, `cache-from`, `cache-to`, `secret`, `ssh`). [docker/buildx#2758](https://github.com/docker/buildx/pull/2758), [docker/buildx#2848](https://github.com/docker/buildx/pull/2848), [docker/buildx#2871](https://github.com/docker/buildx/pull/2871), [docker/buildx#2814](https://github.com/docker/buildx/pull/2814) -- Bake: Filesystem entitlements now error by default. To disable this behavior, you can set `BUILDX_BAKE_ENTITLEMENTS_FS=0`. [docker/buildx#2875](https://github.com/docker/buildx/pull/2875) -- Bake: Infer Git authentication token from remote files to build request. [docker/buildx#2905](https://github.com/docker/buildx/pull/2905) -- Bake: Add support for `--list` flag to list targets and variables. [docker/buildx#2900](https://github.com/docker/buildx/pull/2900), [docker/buildx#2907](https://github.com/docker/buildx/pull/2907) -- Bake: Update lookup order for default definition files to load the files with "override" suffix later. [docker/buildx#2886](https://github.com/docker/buildx/pull/2886) - -### Bug fixes - -- Bake: Fix entitlements check for default SSH socket. [docker/buildx#2898](https://github.com/docker/buildx/pull/2898) -- Bake: Fix missing default target in group's default targets. [docker/buildx#2863](https://github.com/docker/buildx/pull/2863) -- Bake: Fix named context from target platform matching. [docker/buildx#2877](https://github.com/docker/buildx/pull/2877) -- Fix missing documentation for quiet progress mode. [docker/buildx#2899](https://github.com/docker/buildx/pull/2899) -- Fix missing last progress from loading layers. [docker/buildx#2876](https://github.com/docker/buildx/pull/2876) -- Validate BuildKit configuration before creating a builder. [docker/buildx#2864](https://github.com/docker/buildx/pull/2864) - -### Packaging - -- Compose compatibility has been updated to v2.4.7. [docker/buildx#2893](https://github.com/docker/buildx/pull/2893), [docker/buildx#2857](https://github.com/docker/buildx/pull/2857), [docker/buildx#2829](https://github.com/docker/buildx/pull/2829) - -## 0.19.1 - -{{< release-date date="2024-11-27" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.19.1). - -### Bug fixes - -- Reverted the change in v0.19.0 that added new object notation for the fields - that previously required CSV strings in Bake definition. This enhancement was - reverted because of backwards incompatibility issues were discovered in some - edge cases. This feature has now been postponed to the v0.20.0 release. - [docker/buildx#2824](https://github.com/docker/buildx/pull/2824) - -## 0.19.0 - -{{< release-date date="2024-11-27" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.19.0). - -### New - -- Bake now requires you to allow filesystem entitlements when your build needs - to read or write files outside of your current working directory. - [docker/buildx#2796](https://github.com/docker/buildx/pull/2796), - [docker/buildx#2812](https://github.com/docker/buildx/pull/2812). - - To allow filesystem entitlements, use the `--allow fs.read=` flag for - the `docker buildx bake` command. - - This feature currently only reports a warning when using a local Bake - definition, but will start to produce an error starting from the v0.20 - release. To enable the error in the current release, you can set - `BUILDX_BAKE_ENTITLEMENTS_FS=1`. - -### Enhancements - -- Bake definition now supports new object notation for the fields that previously required CSV strings as inputs. [docker/buildx#2758](https://github.com/docker/buildx/pull/2758) - - > [!NOTE] - > This enhancement was reverted in [v0.19.1](#0191) due to a bug. - -- Bake definition now allows defining validation conditions to variables. [docker/buildx#2794](https://github.com/docker/buildx/pull/2794) -- Metadata file values can now contain JSON array values. [docker/buildx#2777](https://github.com/docker/buildx/pull/2777) -- Improved error messages when using an incorrect format for labels. [docker/buildx#2778](https://github.com/docker/buildx/pull/2778) -- FreeBSD and OpenBSD artifacts are now included in the release. [docker/buildx#2774](https://github.com/docker/buildx/pull/2774), [docker/buildx#2775](https://github.com/docker/buildx/pull/2775), [docker/buildx#2781](https://github.com/docker/buildx/pull/2781) - -### Bug fixes - -- Fixed an issue with printing Bake definitions containing empty Compose networks. [docker/buildx#2790](https://github.com/docker/buildx/pull/2790). - -### Packaging - -- Compose support has been updated to v2.4.4. [docker/buildx#2806](https://github.com/docker/buildx/pull/2806) [docker/buildx#2780](https://github.com/docker/buildx/pull/2780). - -## 0.18.0 - -{{< release-date date="2024-10-31" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.18.0). - -### New - -- The `docker buildx inspect` command now displays BuildKit daemon configuration options set with a TOML file. [docker/buildx#2684](https://github.com/docker/buildx/pull/2684) -- The `docker buildx ls` command output is now more compact by default by compacting the platform list. A new `--no-trunc` option can be used for the full list. [docker/buildx#2138](https://github.com/docker/buildx/pull/2138), [docker/buildx#2717](https://github.com/docker/buildx/pull/2717) -- The `docker buildx prune` command now supports new `--max-used-space` and `--min-free-space` filters with BuildKit v0.17.0+ builders. [docker/buildx#2766](https://github.com/docker/buildx/pull/2766) - -### Enhancements - -- Allow capturing of CPU and memory profiles with `pprof` using the [`BUILDX_CPU_PROFILE`](/manuals/build/building/variables.md#buildx_cpu_profile) and [`BUILDX_MEM_PROFILE`](/manuals/build/building/variables.md#buildx_mem_profile) environment variables. [docker/buildx#2746](https://github.com/docker/buildx/pull/2746) -- Maximum Dockerfile size from standard input has increased. [docker/buildx#2716](https://github.com/docker/buildx/pull/2716), [docker/buildx#2719](https://github.com/docker/buildx/pull/2719) -- Memory allocations have been reduced. [docker/buildx#2724](https://github.com/docker/buildx/pull/2724), [docker/buildx#2713](https://github.com/docker/buildx/pull/2713) -- The `--list-targets` and `--list-variables` flags for `docker buildx bake` no longer require initialization of the builder. [docker/buildx#2763](https://github.com/docker/buildx/pull/2763) - -### Bug fixes - -- Check warnings now print the full filepath to the offending Dockerfile, relative to the current working directory. [docker/buildx#2672](https://github.com/docker/buildx/pull/2672) -- Fallback images for the `--check` and `--call` options have been updated to correct references. [docker/buildx#2705](https://github.com/docker/buildx/pull/2705) -- Fix issue with the build details link not showing in experimental mode. [docker/buildx#2722](https://github.com/docker/buildx/pull/2722) -- Fix validation issue with invalid target linking for Bake. [docker/buildx#2700](https://github.com/docker/buildx/pull/2700) -- Fix missing error message when running an invalid command. [docker/buildx#2741](https://github.com/docker/buildx/pull/2741) -- Fix possible false warnings for local state in `--call` requests. [docker/buildx#2754](https://github.com/docker/buildx/pull/2754) -- Fix potential issues with entitlements when using linked targets in Bake. [docker/buildx#2701](https://github.com/docker/buildx/pull/2701) -- Fix possible permission issues when accessing local state after running Buildx with `sudo`. [docker/buildx#2745](https://github.com/docker/buildx/pull/2745) - -### Packaging - -- Compose compatibility has been updated to v2.4.1. [docker/buildx#2760](https://github.com/docker/buildx/pull/2760) - -## 0.17.1 - -{{< release-date date="2024-09-13" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.17.1). - -### Bug fixes - -- Do not set `network.host` entitlement flag automatically on builder creation - for the `docker-container` and `kubernetes` drivers if the entitlement is set - in the [BuildKit configuration file](/manuals/build/buildkit/toml-configuration.md). [docker/buildx#2685] -- Do not print the `network` field with `docker buildx bake --print` when empty. [docker/buildx#2689] -- Fix telemetry socket path under WSL2. [docker/buildx#2698] - -[docker/buildx#2685]: https://github.com/docker/buildx/pull/2685 -[docker/buildx#2689]: https://github.com/docker/buildx/pull/2689 -[docker/buildx#2698]: https://github.com/docker/buildx/pull/2698 - -## 0.17.0 - -{{< release-date date="2024-09-10" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.17.0). - -### New - -- Add `basename`, `dirname` and `sanitize` functions to Bake. [docker/buildx#2649] -- Enable support for Bake entitlements to allow privileged operations during builds. [docker/buildx#2666] - -### Enhancements - -- Introduce CLI metrics tracking for Bake commands. [docker/buildx#2610] -- Add `--debug` to all build commands. Previously, it was only available on the top-level `docker` and `docker buildx` commands. [docker/buildx#2660] -- Allow builds from stdin for multi-node builders. [docker/buildx#2656] -- Improve `kubernetes` driver initialization. [docker/buildx#2606] -- Include target name in the error message when building multiple targets with Bake. [docker/buildx#2651] -- Optimize metrics handling to reduce performance overhead during progress tracking. [docker/buildx#2641] -- Display the number of warnings after completing a rule check. [docker/buildx#2647] -- Skip build ref and provenance metadata for frontend methods. [docker/buildx#2650] -- Add support for setting network mode in Bake files (HCL and JSON). [docker/buildx#2671] -- Support the `--metadata-file` flag when set along the `--call` flag. [docker/buildx#2640] -- Use shared session for local contexts used by multiple Bake targets. [docker/buildx#2615], [docker/buildx#2607], [docker/buildx#2663] - -### Bug fixes - -- Improve memory management to avoid unnecessary allocations. [docker/buildx#2601] - -### Packaging updates - -- Compose support has been updated to v2.1.6. [docker/buildx#2547] - -[docker/buildx#2547]: https://github.com/docker/buildx/pull/2547/ -[docker/buildx#2601]: https://github.com/docker/buildx/pull/2601/ -[docker/buildx#2606]: https://github.com/docker/buildx/pull/2606/ -[docker/buildx#2607]: https://github.com/docker/buildx/pull/2607/ -[docker/buildx#2610]: https://github.com/docker/buildx/pull/2610/ -[docker/buildx#2615]: https://github.com/docker/buildx/pull/2615/ -[docker/buildx#2640]: https://github.com/docker/buildx/pull/2640/ -[docker/buildx#2641]: https://github.com/docker/buildx/pull/2641/ -[docker/buildx#2647]: https://github.com/docker/buildx/pull/2647/ -[docker/buildx#2649]: https://github.com/docker/buildx/pull/2649/ -[docker/buildx#2650]: https://github.com/docker/buildx/pull/2650/ -[docker/buildx#2651]: https://github.com/docker/buildx/pull/2651/ -[docker/buildx#2656]: https://github.com/docker/buildx/pull/2656/ -[docker/buildx#2660]: https://github.com/docker/buildx/pull/2660/ -[docker/buildx#2663]: https://github.com/docker/buildx/pull/2663/ -[docker/buildx#2666]: https://github.com/docker/buildx/pull/2666/ -[docker/buildx#2671]: https://github.com/docker/buildx/pull/2671/ - -## 0.16.2 - -{{< release-date date="2024-07-25" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.16.2). - -### Bug fixes - -- Fix possible "bad file descriptor" error when exporting local cache to NFS volume [docker/buildx#2629](https://github.com/docker/buildx/pull/2629/) - -## 0.16.1 - -{{< release-date date="2024-07-18" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.16.1). - -### Bug fixes - -- Fix possible panic due to data race in `buildx bake --print` command [docker/buildx#2603](https://github.com/docker/buildx/pull/2603/) -- Improve messaging about using `--debug` flag to inspect build warnings [docker/buildx#2612](https://github.com/docker/buildx/pull/2612/) - -## 0.16.0 - -{{< release-date date="2024-07-11" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.16.0). - -### New - -- Bake command now supports `--call` and `--check` flags and `call` attribute in target definitions for selecting custom frontend methods. [docker/buildx#2556](https://github.com/docker/buildx/pull/2556/), [docker/buildx#2576](https://github.com/docker/buildx/pull/2576/) -- {{< badge color=violet text=Experimental >}} Bake now supports `--list-targets` and `--list-variables` flags for inspecting the definition and possible configuration options for your project. [docker/buildx#2556](https://github.com/docker/buildx/pull/2556/) -- Bake definition variables and targets supports new `description` attribute for defining text-based description that can be inspected using e.g. `--list-targets` and `--list-variables`. [docker/buildx#2556](https://github.com/docker/buildx/pull/2556/) -- Bake now supports printing warnings for build check violations. [docker/buildx#2501](https://github.com/docker/buildx/pull/2501/) - -### Enhancements - -- The build command now ensures that multi-node builds use the same build reference for each node. [docker/buildx#2572](https://github.com/docker/buildx/pull/2572/) -- Avoid duplicate requests and improve the performance of remote driver. [docker/buildx#2501](https://github.com/docker/buildx/pull/2501/) -- Build warnings can now be saved to the metadata file by setting the `BUILDX_METADATA_WARNINGS=1` environment variable. [docker/buildx#2551](https://github.com/docker/buildx/pull/2551/), [docker/buildx#2521](https://github.com/docker/buildx/pull/2521/), [docker/buildx#2550](https://github.com/docker/buildx/pull/2550/) -- Improve message of the `--check` flag when no warnings are detected. [docker/buildx#2549](https://github.com/docker/buildx/pull/2549/) - -### Bug fixes - -- Fix support for multi-type annotations during build. [docker/buildx#2522](https://github.com/docker/buildx/pull/2522/) -- Fix a regression where possible inefficient transfer of files would occur when switching projects due to incremental transfer reuse. [docker/buildx#2558](https://github.com/docker/buildx/pull/2558/) -- Fix incorrect default load for chained Bake targets. [docker/buildx#2583](https://github.com/docker/buildx/pull/2583/) -- Fix incorrect `COMPOSE_PROJECT_NAME` handling in Bake. [docker/buildx#2579](https://github.com/docker/buildx/pull/2579/) -- Fix index annotations support for multi-node builds. [docker/buildx#2546](https://github.com/docker/buildx/pull/2546/) -- Fix capturing provenance metadata for builds from remote context. [docker/buildx#2560](https://github.com/docker/buildx/pull/2560/) - -### Packaging updates - -- Compose support has been updated to v2.1.3. [docker/buildx#2547](https://github.com/docker/buildx/pull/2547/) - -## 0.15.1 - -{{< release-date date="2024-06-18" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.15.1). - -### Bug fixes - -- Fix missing build error and exit code for some validation requests with `--check`. [docker/buildx#2518](https://github.com/docker/buildx/pull/2518/) -- Update fallback image for `--check` to Dockerfile v1.8.1. [docker/buildx#2538](https://github.com/docker/buildx/pull/2538/) - -## 0.15.0 - -{{< release-date date="2024-06-11" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.15.0). - -### New - -- New `--call` option allows setting evaluation method for a build, replacing the previous experimental `--print` flag. [docker/buildx#2498](https://github.com/docker/buildx/pull/2498/), [docker/buildx#2487](https://github.com/docker/buildx/pull/2487/), [docker/buildx#2513](https://github.com/docker/buildx/pull/2513/) - - In addition to the default `build` method, the following methods are implemented by Dockerfile frontend: - - - [`--call=check`](/reference/cli/docker/buildx/build.md#check): Run validation routines for your build configuration. For more information about build checks, see [Build checks](/manuals/build/checks.md) - - [`--call=outline`](/reference/cli/docker/buildx/build.md#call-outline): Show configuration that would be used by current build, including all build arguments, secrets, SSH mounts, etc., that your build would use. - - [`--call=targets`](/reference/cli/docker/buildx/build.md#call-targets): Show all available targets and their descriptions. - -- New `--prefer-index` flag has been added to the `docker buildx imagetools create` command to control the behavior of creating image out of one single-platform image manifest. [docker/buildx#2482](https://github.com/docker/buildx/pull/2482/) -- The [`kubernetes` driver](/manuals/build/builders/drivers/kubernetes.md) now supports a `timeout` option for configuring deployment timeout. [docker/buildx#2492](https://github.com/docker/buildx/pull/2492/) -- New metrics definitions have been added for build warning types. [docker/buildx#2482](https://github.com/docker/buildx/pull/2482/), [docker/buildx#2507](https://github.com/docker/buildx/pull/2507/) -- The [`buildx prune`](/reference/cli/docker/buildx/prune.md) and [`buildx du`](/reference/cli/docker/buildx/du.md) commands now support negative and prefix filters. [docker/buildx#2473](https://github.com/docker/buildx/pull/2473/) -- Building Compose files with Bake now supports passing SSH forwarding configuration. [docker/buildx#2445](https://github.com/docker/buildx/pull/2445/) -- Fix issue with configuring the `kubernetes` driver with custom TLS certificates. [docker/buildx#2454](https://github.com/docker/buildx/pull/2454/) -- Fix concurrent kubeconfig access when loading nodes. [docker/buildx#2497](https://github.com/docker/buildx/pull/2497/) - -### Packaging updates - -- Compose support has been updated to v2.1.2. [docker/buildx#2502](https://github.com/docker/buildx/pull/2502/), [docker/buildx#2425](https://github.com/docker/buildx/pull/2425/) - -## 0.14.0 - -{{< release-date date="2024-04-18" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.14.0). - -### Enhancements - -- Add support for `--print=lint` (experimental). - [docker/buildx#2404](https://github.com/docker/buildx/pull/2404), - [docker/buildx#2406](https://github.com/docker/buildx/pull/2406) -- Fix JSON formatting for custom implementations of print sub-requests in frontends. - [docker/buildx#2374](https://github.com/docker/buildx/pull/2374) -- Provenance records are now set when building with `--metadata-file`. - [docker/buildx#2280](https://github.com/docker/buildx/pull/2280) -- Add [Git authentication support](./bake/remote-definition.md#remote-definition-in-a-private-repository) for remote definitions. - [docker/buildx#2363](https://github.com/docker/buildx/pull/2363) -- New `default-load` driver option for the `docker-container`, `remote`, and `kubernetes` drivers to load build results to the Docker Engine image store by default. - [docker/buildx#2259](https://github.com/docker/buildx/pull/2259) -- Add `requests.ephemeral-storage`, `limits.ephemeral-storage` and `schedulername` options to the [`kubernetes` driver](/manuals/build/builders/drivers/kubernetes.md). - [docker/buildx#2370](https://github.com/docker/buildx/pull/2370), - [docker/buildx#2415](https://github.com/docker/buildx/pull/2415) -- Add `indexof` function for `docker-bake.hcl` files. - [docker/buildx#2384](https://github.com/docker/buildx/pull/2384) -- OpenTelemetry metrics for Buildx now measure durations of idle time, image exports, run operations, and image transfers for image source operations during build. - [docker/buildx#2316](https://github.com/docker/buildx/pull/2316), - [docker/buildx#2317](https://github.com/docker/buildx/pull/2317), - [docker/buildx#2323](https://github.com/docker/buildx/pull/2323), - [docker/buildx#2271](https://github.com/docker/buildx/pull/2271) -- Build progress metrics to the OpenTelemetry endpoint associated with the `desktop-linux` context no longer requires Buildx in experimental mode (`BUILDX_EXPERIMENTAL=1`). - [docker/buildx#2344](https://github.com/docker/buildx/pull/2344) - -### Bug fixes - -- Fix `--load` and `--push` incorrectly overriding outputs when used with multiple Bake file definitions. - [docker/buildx#2336](https://github.com/docker/buildx/pull/2336) -- Fix build from stdin with experimental mode enabled. - [docker/buildx#2394](https://github.com/docker/buildx/pull/2394) -- Fix an issue where delegated traces could be duplicated. - [docker/buildx#2362](https://github.com/docker/buildx/pull/2362) - -### Packaging updates - -- Compose support has been updated to [v2.26.1](https://github.com/docker/compose/releases/tag/v2.26.1) - (via [`compose-go` v2.0.2](https://github.com/compose-spec/compose-go/releases/tag/v2.0.2)). - [docker/buildx#2391](https://github.com/docker/buildx/pull/2391) - -## 0.13.1 - -{{< release-date date="2024-03-13" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.13.1). - -### Bug fixes - -- Fix connecting to `docker-container://` and `kube-pod://` style URLs with remote driver. [docker/buildx#2327](https://github.com/docker/buildx/pull/2327) -- Fix handling of `--push` with Bake when a target has already defined a non-image output. [docker/buildx#2330](https://github.com/docker/buildx/pull/2330) - -## 0.13.0 - -{{< release-date date="2024-03-06" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.13.0). - -### New - -- New `docker buildx dial-stdio` command for directly contacting BuildKit daemon of the configured builder instance. [docker/buildx#2112](https://github.com/docker/buildx/pull/2112) -- Windows container builders can now be created using the `remote` driver and npipe connections. [docker/buildx#2287](https://github.com/docker/buildx/pull/2287) -- Npipe URL scheme is now supported on Windows. [docker/buildx#2250](https://github.com/docker/buildx/pull/2250) -- {{< badge color=violet text=Experimental >}} Buildx can now export OpenTelemetry metrics for build duration and transfer sizes. [docker/buildx#2235](https://github.com/docker/buildx/pull/2235), [docker/buildx#2258](https://github.com/docker/buildx/pull/2258) [docker/buildx#2225](https://github.com/docker/buildx/pull/2225) [docker/buildx#2224](https://github.com/docker/buildx/pull/2224) [docker/buildx#2155](https://github.com/docker/buildx/pull/2155) - -### Enhancements - -- Bake command now supports defining `shm-size` and `ulimit` values. [docker/buildx#2279](https://github.com/docker/buildx/pull/2279), [docker/buildx#2242](https://github.com/docker/buildx/pull/2242) -- Better handling of connecting to unhealthy nodes with remote driver. [docker/buildx#2130](https://github.com/docker/buildx/pull/2130) -- Builders using the `docker-container` and `kubernetes` drivers now allow `network.host` entitlement by default (allowing access to the container's network). [docker/buildx#2266](https://github.com/docker/buildx/pull/2266) -- Builds can now use multiple outputs with a single command (requires BuildKit v0.13+). [docker/buildx#2290](https://github.com/docker/buildx/pull/2290), [docker/buildx#2302](https://github.com/docker/buildx/pull/2302) -- Default Git repository path is now found via configured tracking branch. [docker/buildx#2146](https://github.com/docker/buildx/pull/2146) -- Fix possible cache invalidation when using linked targets in Bake. [docker/buildx#2265](https://github.com/docker/buildx/pull/2265) -- Fixes for Git repository path sanitization in WSL. [docker/buildx#2167](https://github.com/docker/buildx/pull/2167) -- Multiple builders can now be removed with a single command. [docker/buildx#2140](https://github.com/docker/buildx/pull/2140) -- New cancellation signal handling via Unix socket. [docker/buildx#2184](https://github.com/docker/buildx/pull/2184) [docker/buildx#2289](https://github.com/docker/buildx/pull/2289) -- The Compose spec support has been updated to v2.0.0-rc.8. [docker/buildx#2205](https://github.com/docker/buildx/pull/2205) -- The `--config` flag for `docker buildx create` was renamed to `--buildkitd-config`. [docker/buildx#2268](https://github.com/docker/buildx/pull/2268) -- The `--metadata-file` flag for `docker buildx build` can now also return build reference that can be used for further build debugging, for example, in Docker Desktop. [docker/buildx#2263](https://github.com/docker/buildx/pull/2263) -- The `docker buildx bake` command now shares the same authentication provider for all targets for improved performance. [docker/buildx#2147](https://github.com/docker/buildx/pull/2147) -- The `docker buildx imagetools inspect` command now shows DSSE-signed SBOM and Provenance attestations. [docker/buildx#2194](https://github.com/docker/buildx/pull/2194) -- The `docker buildx ls` command now supports `--format` options for controlling the output. [docker/buildx#1787](https://github.com/docker/buildx/pull/1787) -- The `docker-container` driver now supports driver options for defining restart policy for BuildKit container. [docker/buildx#1271](https://github.com/docker/buildx/pull/1271) -- VCS attributes exported from Buildx now include the local directory sub-paths if they're relative to the current Git repository. [docker/buildx#2156](https://github.com/docker/buildx/pull/2156) -- `--add-host` flag now permits a `=` separator for IPv6 addresses. [docker/buildx#2121](https://github.com/docker/buildx/pull/2121) - -### Bug fixes - -- Fix additional output when exporting progress with `--progress=rawjson` [docker/buildx#2252](https://github.com/docker/buildx/pull/2252) -- Fix possible console warnings on Windows. [docker/buildx#2238](https://github.com/docker/buildx/pull/2238) -- Fix possible inconsistent configuration merge order when using Bake with many configurations. [docker/buildx#2237](https://github.com/docker/buildx/pull/2237) -- Fix possible panic in the `docker buildx imagetools create` command. [docker/buildx#2230](https://github.com/docker/buildx/pull/2230) - -## 0.12.1 - -{{< release-date date="2024-01-12" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.12.1). - -### Bug fixes and enhancements - -- Fix incorrect validation of some `--driver-opt` values that could cause a panic and corrupt state to be stored. - [docker/buildx#2176](https://github.com/docker/buildx/pull/2176) - -## 0.12.0 - -{{< release-date date="2023-11-16" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.12.0). - -### New - -- New `--annotation` flag for the `buildx build`, and an `annotations` key in the Bake file, that lets you add OCI Annotations to build results. - [#2020](https://github.com/docker/buildx/pull/2020), - [#2098](https://github.com/docker/buildx/pull/2098) -- New experimental debugging features, including a new `debug` command and an interactive debugging console. - This feature currently requires setting `BUILDX_EXPERIMENTAL=1`. - [#2006](https://github.com/docker/buildx/pull/2006), - [#1896](https://github.com/docker/buildx/pull/1896), - [#1970](https://github.com/docker/buildx/pull/1970), - [#1914](https://github.com/docker/buildx/pull/1914), - [#2026](https://github.com/docker/buildx/pull/2026), - [#2086](https://github.com/docker/buildx/pull/2086) - -### Bug fixes and enhancements - -- The special `host-gateway` IP mapping can now be used with the `--add-host` flag during build. - [#1894](https://github.com/docker/buildx/pull/1894), - [#2083](https://github.com/docker/buildx/pull/2083) -- Bake now allows adding local source files when building from remote definition. - [#1838](https://github.com/docker/buildx/pull/1838) -- The status of uploading build results to Docker is now shown interactively on progress bar. - [#1994](https://github.com/docker/buildx/pull/1994) -- Error handling has been improved when bootstrapping multi-node build clusters. - [#1869](https://github.com/docker/buildx/pull/1869) -- The `buildx imagetools create` command now allows adding annotation when creating new images in the registry. - [#1965](https://github.com/docker/buildx/pull/1965) -- OpenTelemetry build trace delegation from buildx is now possible with Docker and Remote driver. - [#2034](https://github.com/docker/buildx/pull/2034) -- Bake command now shows all files where the build definition was loaded from on the progress bar. - [#2076](https://github.com/docker/buildx/pull/2076) -- Bake files now allow the same attributes to be defined in multiple definition files. - [#1062](https://github.com/docker/buildx/pull/1062) -- Using the Bake command with a remote definition now allows this definition to use local Dockerfiles. - [#2015](https://github.com/docker/buildx/pull/2015) -- Docker container driver now explicitly sets BuildKit config path to make sure configurations are loaded from same location for both mainline and rootless images. - [#2093](https://github.com/docker/buildx/pull/2093) -- Improve performance of detecting when BuildKit instance has completed booting. - [#1934](https://github.com/docker/buildx/pull/1934) -- Container driver now accepts many new driver options for defining the resource limits for BuildKit container. - [#2048](https://github.com/docker/buildx/pull/2048) -- Inspection commands formatting has been improved. - [#2068](https://github.com/docker/buildx/pull/2068) -- Error messages about driver capabilities have been improved. - [#1998](https://github.com/docker/buildx/pull/1998) -- Improve errors when invoking Bake command without targets. - [#2100](https://github.com/docker/buildx/pull/2100) -- Allow enabling debug logs with environment variables when running in standalone mode. - [#1821](https://github.com/docker/buildx/pull/1821) -- When using Docker driver the default image resolve mode has been updated to prefer local Docker images for backward compatibility. - [#1886](https://github.com/docker/buildx/pull/1886) -- Kubernetes driver now allows setting custom annotations and labels to the BuildKit deployments and pods. - [#1938](https://github.com/docker/buildx/pull/1938) -- Kubernetes driver now allows setting authentication token with endpoint configuration. - [#1891](https://github.com/docker/buildx/pull/1891) -- Fix possible issue with chained targets in Bake that could result in build failing or local source for a target uploaded multiple times. - [#2113](https://github.com/docker/buildx/pull/2113) -- Fix issue when accessing global target properties when using the matrix feature of the Bake command. - [#2106](https://github.com/docker/buildx/pull/2106) -- Fixes for formatting validation of certain build flags - [#2040](https://github.com/docker/buildx/pull/2040) -- Fixes to avoid locking certain commands unnecessarily while booting builder nodes. - [#2066](https://github.com/docker/buildx/pull/2066) -- Fix cases where multiple builds try to bootstrap the same builder instance in parallel. - [#2000](https://github.com/docker/buildx/pull/2000) -- Fix cases where errors on uploading build results to Docker could be dropped in some cases. - [#1927](https://github.com/docker/buildx/pull/1927) -- Fix detecting capabilities for missing attestation support based on build output. - [#1988](https://github.com/docker/buildx/pull/1988) -- Fix the build for loading in Bake remote definition to not show up in build history records. - [#1961](https://github.com/docker/buildx/pull/1961), - [#1954](https://github.com/docker/buildx/pull/1954) -- Fix errors when building Compose files using the that define profiles with Bake. - [#1903](https://github.com/docker/buildx/pull/1903) -- Fix possible time correction errors on progress bar. - [#1968](https://github.com/docker/buildx/pull/1968) -- Fix passing custom cgroup parent to builds that used the new controller interface. - [#1913](https://github.com/docker/buildx/pull/1913) - -### Packaging - -- Compose support has been updated to 1.20, enabling "include" functionality when using the Bake command. - [#1971](https://github.com/docker/buildx/pull/1971), - [#2065](https://github.com/docker/buildx/pull/2065), - [#2094](https://github.com/docker/buildx/pull/2094) - -## 0.11.2 - -{{< release-date date="2023-07-18" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.11.2). - -### Bug fixes and enhancements - -- Fix a regression that caused buildx to not read the `KUBECONFIG` path from the instance store. - [docker/buildx#1941](https://github.com/docker/buildx/pull/1941) -- Fix a regression with result handle builds showing up in the build history incorrectly. - [docker/buildx#1954](https://github.com/docker/buildx/pull/1954) - -## 0.11.1 - -{{< release-date date="2023-07-05" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.11.1). - -### Bug fixes and enhancements - -- Fix a regression for bake where services in profiles would not be loaded. - [docker/buildx#1903](https://github.com/docker/buildx/pull/1903) -- Fix a regression where `--cgroup-parent` option had no effect during build. - [docker/buildx#1913](https://github.com/docker/buildx/pull/1913) -- Fix a regression where valid docker contexts could fail buildx builder name - validation. [docker/buildx#1879](https://github.com/docker/buildx/pull/1879) -- Fix a possible panic when terminal is resized during the build. - [docker/buildx#1929](https://github.com/docker/buildx/pull/1929) - -## 0.11.0 - -{{< release-date date="2023-06-13" >}} - -The full release note for this release is available -[on GitHub](https://github.com/docker/buildx/releases/tag/v0.11.0). - -### New - -- Bake now supports [matrix builds](/manuals/build/bake/reference.md#targetmatrix). - The new matrix field on `target` lets you create multiple similar targets to - remove duplication in bake files. [docker/buildx#1690](https://github.com/docker/buildx/pull/1690) -- New experimental `--detach` flag for running builds in detached mode. - [docker/buildx#1296](https://github.com/docker/buildx/pull/1296), - [docker/buildx#1620](https://github.com/docker/buildx/pull/1620), - [docker/buildx#1614](https://github.com/docker/buildx/pull/1614), - [docker/buildx#1737](https://github.com/docker/buildx/pull/1737), - [docker/buildx#1755](https://github.com/docker/buildx/pull/1755) -- New experimental [debug monitor mode](https://github.com/docker/buildx/blob/v0.11.0-rc1/docs/guides/debugging.md) - that lets you start a debug session in your builds. - [docker/buildx#1626](https://github.com/docker/buildx/pull/1626), - [docker/buildx#1640](https://github.com/docker/buildx/pull/1640) -- New [`EXPERIMENTAL_BUILDKIT_SOURCE_POLICY` environment variable](./building/variables.md#experimental_buildkit_source_policy) - for applying a BuildKit source policy file. - [docker/buildx#1628](https://github.com/docker/buildx/pull/1628) - -### Bug fixes and enhancements - -- `--load` now supports loading multi-platform images when the containerd image - store is enabled. - [docker/buildx#1813](https://github.com/docker/buildx/pull/1813) -- Build progress output now displays the name of the builder being used. - [docker/buildx#1177](https://github.com/docker/buildx/pull/1177) -- Bake now supports detecting `compose.{yml,yaml}` files. - [docker/buildx#1752](https://github.com/docker/buildx/pull/1752) -- Bake now supports new compose build keys `dockerfile_inline` and `additional_contexts`. - [docker/buildx#1784](https://github.com/docker/buildx/pull/1784) -- Bake now supports replace HCL function. - [docker/buildx#1720](https://github.com/docker/buildx/pull/1720) -- Bake now allows merging multiple similar attestation parameters into a single - parameter to allow overriding with a single global value. - [docker/buildx#1699](https://github.com/docker/buildx/pull/1699) -- Initial support for shell completion. - [docker/buildx#1727](https://github.com/docker/buildx/pull/1727) -- BuildKit versions now correctly display in `buildx ls` and `buildx inspect` - for builders using the `docker` driver. - [docker/buildx#1552](https://github.com/docker/buildx/pull/1552) -- Display additional builder node details in buildx inspect view. - [docker/buildx#1440](https://github.com/docker/buildx/pull/1440), - [docker/buildx#1854](https://github.com/docker/buildx/pull/1874) -- Builders using the `remote` driver allow using TLS without proving its own - key/cert (if BuildKit remote is configured to support it) - [docker/buildx#1693](https://github.com/docker/buildx/pull/1693) -- Builders using the `kubernetes` driver support a new `serviceaccount` option, - which sets the `serviceAccountName` of the Kubernetes pod. - [docker/buildx#1597](https://github.com/docker/buildx/pull/1597) -- Builders using the `kubernetes` driver support the `proxy-url` option in the - kubeconfig file. - [docker/buildx#1780](https://github.com/docker/buildx/pull/1780) -- Builders using the `kubernetes` are now automatically assigned a node name if - no name is explicitly provided. - [docker/buildx#1673](https://github.com/docker/buildx/pull/1673) -- Fix invalid path when writing certificates for `docker-container` driver on Windows. - [docker/buildx#1831](https://github.com/docker/buildx/pull/1831) -- Fix bake failure when remote bake file is accessed using SSH. - [docker/buildx#1711](https://github.com/docker/buildx/pull/1711), - [docker/buildx#1734](https://github.com/docker/buildx/pull/1734) -- Fix bake failure when remote bake context is incorrectly resolved. - [docker/buildx#1783](https://github.com/docker/buildx/pull/1783) -- Fix path resolution of `BAKE_CMD_CONTEXT` and `cwd://` paths in bake contexts. - [docker/buildx#1840](https://github.com/docker/buildx/pull/1840) -- Fix mixed OCI and Docker media types when creating images using - `buildx imagetools create`. - [docker/buildx#1797](https://github.com/docker/buildx/pull/1797) -- Fix mismatched image id between `--iidfile` and `-q`. - [docker/buildx#1844](https://github.com/docker/buildx/pull/1844) -- Fix AWS authentication when mixing static creds and IAM profiles. - [docker/buildx#1816](https://github.com/docker/buildx/pull/1816) - -## 0.10.4 - -{{< release-date date="2023-03-06" >}} - -{{% include "buildx-v0.10-disclaimer.md" %}} - -### Bug fixes and enhancements - -- Add `BUILDX_NO_DEFAULT_ATTESTATIONS` as alternative to `--provenance false`. [docker/buildx#1645](https://github.com/docker/buildx/issues/1645) -- Disable dirty Git checkout detection by default for performance. Can be enabled with `BUILDX_GIT_CHECK_DIRTY` opt-in. [docker/buildx#1650](https://github.com/docker/buildx/issues/1650) -- Strip credentials from VCS hint URL before sending to BuildKit. [docker/buildx#1664](https://github.com/docker/buildx/issues/1664) - -## 0.10.3 - -{{< release-date date="2023-02-16" >}} - -{{% include "buildx-v0.10-disclaimer.md" %}} - -### Bug fixes and enhancements - -- Fix reachable commit and warnings on collecting Git provenance info. [docker/buildx#1592](https://github.com/docker/buildx/issues/1592), [docker/buildx#1634](https://github.com/docker/buildx/issues/1634) -- Fix a regression where docker context was not being validated. [docker/buildx#1596](https://github.com/docker/buildx/issues/1596) -- Fix function resolution with JSON bake definition. [docker/buildx#1605](https://github.com/docker/buildx/issues/1605) -- Fix case where original HCL bake diagnostic is discarded. [docker/buildx#1607](https://github.com/docker/buildx/issues/1607) -- Fix labels not correctly set with bake and compose file. [docker/buildx#1631](https://github.com/docker/buildx/issues/1631) - -## 0.10.2 - -{{< release-date date="2023-01-30" >}} - -{{% include "buildx-v0.10-disclaimer.md" %}} - -### Bug fixes and enhancements - -- Fix preferred platforms order not taken into account in multi-node builds. [docker/buildx#1561](https://github.com/docker/buildx/issues/1561) -- Fix possible panic on handling `SOURCE_DATE_EPOCH` environment variable. [docker/buildx#1564](https://github.com/docker/buildx/issues/1564) -- Fix possible push error on multi-node manifest merge since BuildKit v0.11 on - some registries. [docker/buildx#1566](https://github.com/docker/buildx/issues/1566) -- Improve warnings on collecting Git provenance info. [docker/buildx#1568](https://github.com/docker/buildx/issues/1568) - -## 0.10.1 - -{{< release-date date="2023-01-27" >}} - -{{% include "buildx-v0.10-disclaimer.md" %}} - -### Bug fixes and enhancements - -- Fix sending the correct origin URL as `vsc:source` metadata. [docker/buildx#1548](https://github.com/docker/buildx/issues/1548) -- Fix possible panic from data-race. [docker/buildx#1504](https://github.com/docker/buildx/issues/1504) -- Fix regression with `rm --all-inactive`. [docker/buildx#1547](https://github.com/docker/buildx/issues/1547) -- Improve attestation access in `imagetools inspect` by lazily loading data. [docker/buildx#1546](https://github.com/docker/buildx/issues/1546) -- Correctly mark capabilities request as internal. [docker/buildx#1538](https://github.com/docker/buildx/issues/1538) -- Detect invalid attestation configuration. [docker/buildx#1545](https://github.com/docker/buildx/issues/1545) -- Update containerd patches to fix possible push regression affecting - `imagetools` commands. [docker/buildx#1559](https://github.com/docker/buildx/issues/1559) - -## 0.10.0 - -{{< release-date date="2023-01-10" >}} - -{{% include "buildx-v0.10-disclaimer.md" %}} - -### New - -- The `buildx build` command supports new `--attest` flag, along with - shorthands `--sbom` and `--provenance`, for adding attestations for your - current build. [docker/buildx#1412](https://github.com/docker/buildx/issues/1412) - [docker/buildx#1475](https://github.com/docker/buildx/issues/1475) - - `--attest type=sbom` or `--sbom=true` adds [SBOM attestations](/manuals/build/metadata/attestations/sbom.md). - - `--attest type=provenance` or `--provenance=true` adds [SLSA provenance attestation](/manuals/build/metadata/attestations/slsa-provenance.md). - - When creating OCI images, a minimal provenance attestation is included - with the image by default. -- When building with BuildKit that supports provenance attestations Buildx will - automatically share the version control information of your build context, so - it can be shown in provenance for later debugging. Previously this only - happened when building from a Git URL directly. To opt-out of this behavior - you can set `BUILDX_GIT_INFO=0`. Optionally you can also automatically define - labels with VCS info by setting `BUILDX_GIT_LABELS=1`. - [docker/buildx#1462](https://github.com/docker/buildx/issues/1462), - [docker/buildx#1297](https://github.com/docker/buildx), - [docker/buildx#1341](https://github.com/docker/buildx/issues/1341), - [docker/buildx#1468](https://github.com/docker/buildx), - [docker/buildx#1477](https://github.com/docker/buildx/issues/1477) -- Named contexts with `--build-context` now support `oci-layout://` protocol - for initializing the context with a value of a local OCI layout directory. - E.g. `--build-context stagename=oci-layout://path/to/dir`. This feature - requires BuildKit v0.11.0+ and Dockerfile 1.5.0+. [docker/buildx#1456](https://github.com/docker/buildx/issues/1456) -- Bake now supports [resource interpolation](bake/inheritance.md#reusing-single-attribute-from-targets) - where you can reuse the values from other target definitions. [docker/buildx#1434](https://github.com/docker/buildx/issues/1434) -- Buildx will now automatically forward `SOURCE_DATE_EPOCH` environment variable - if it is defined in your environment. This feature is meant to be used with - updated [reproducible builds](https://github.com/moby/buildkit/blob/master/docs/build-repro.md) - support in BuildKit v0.11.0+. [docker/buildx#1482](https://github.com/docker/buildx/issues/1482) -- Buildx now remembers the last activity for a builder for better organization - of builder instances. [docker/buildx#1439](https://github.com/docker/buildx/issues/1439) -- Bake definition now supports null values for [variables](bake/reference.md#variable) and [labels](bake/reference.md#targetlabels) - for build arguments and labels to use the defaults set in the Dockerfile. - [docker/buildx#1449](https://github.com/docker/buildx/issues/1449) -- The [`buildx imagetools inspect` command](/reference/cli/docker/buildx/imagetools/inspect.md) - now supports showing SBOM and Provenance data. - [docker/buildx#1444](https://github.com/docker/buildx/issues/1444), - [docker/buildx#1498](https://github.com/docker/buildx/issues/1498) -- Increase performance of `ls` command and inspect flows. - [docker/buildx#1430](https://github.com/docker/buildx/issues/1430), - [docker/buildx#1454](https://github.com/docker/buildx/issues/1454), - [docker/buildx#1455](https://github.com/docker/buildx/issues/1455), - [docker/buildx#1345](https://github.com/docker/buildx/issues/1345) -- Adding extra hosts with [Docker driver](/manuals/build/builders/drivers/docker.md) now supports - Docker-specific `host-gateway` special value. [docker/buildx#1446](https://github.com/docker/buildx/issues/1446) -- [OCI exporter](exporters/oci-docker.md) now supports `tar=false` option for - exporting OCI format directly in a directory. [docker/buildx#1420](https://github.com/docker/buildx/issues/1420) - -### Upgrades - -- Updated the Compose Specification to 1.6.0. [docker/buildx#1387](https://github.com/docker/buildx/issues/1387) - -### Bug fixes and enhancements - -- `--invoke` can now load default launch environment from the image metadata. [docker/buildx#1324](https://github.com/docker/buildx/issues/1324) -- Fix container driver behavior in regards to UserNS. [docker/buildx#1368](https://github.com/docker/buildx/issues/1368) -- Fix possible panic in Bake when using wrong variable value type. [docker/buildx#1442](https://github.com/docker/buildx/issues/1442) -- Fix possible panic in `imagetools inspect`. [docker/buildx#1441](https://github.com/docker/buildx/issues/1441) - [docker/buildx#1406](https://github.com/docker/buildx/issues/1406) -- Fix sending empty `--add-host` value to BuildKit by default. [docker/buildx#1457](https://github.com/docker/buildx/issues/1457) -- Fix handling progress prefixes with progress groups. [docker/buildx#1305](https://github.com/docker/buildx/issues/1305) -- Fix recursively resolving groups in Bake. [docker/buildx#1313](https://github.com/docker/buildx/issues/1313) -- Fix possible wrong indentation on multi-node builder manifests. [docker/buildx#1396](https://github.com/docker/buildx/issues/1396) -- Fix possible panic from missing OpenTelemetry configuration. [docker/buildx#1383](https://github.com/docker/buildx/issues/1383) -- Fix `--progress=tty` behavior when TTY is not available. [docker/buildx#1371](https://github.com/docker/buildx/issues/1371) -- Fix connection error conditions in `prune` and `du` commands. [docker/buildx#1307](https://github.com/docker/buildx/issues/1307) - -## 0.9.1 - -{{< release-date date="2022-08-18" >}} - -### Bug fixes and enhancements - -- The `inspect` command now displays the BuildKit version in use. [docker/buildx#1279](https://github.com/docker/buildx/issues/1279) -- Fixed a regression when building Compose files that contain services without a - build block. [docker/buildx#1277](https://github.com/docker/buildx/issues/1277) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.9.1). - -## 0.9.0 - -{{< release-date date="2022-08-17" >}} - -### New - -- Support for a new [`remote` driver](/manuals/build/builders/drivers/remote.md) that you can use - to connect to any already running BuildKit instance. - [docker/buildx#1078](https://github.com/docker/buildx/issues/1078), - [docker/buildx#1093](https://github.com/docker/buildx/issues/1093), - [docker/buildx#1094](https://github.com/docker/buildx/issues/1094), - [docker/buildx#1103](https://github.com/docker/buildx/issues/1103), - [docker/buildx#1134](https://github.com/docker/buildx/issues/1134), - [docker/buildx#1204](https://github.com/docker/buildx/issues/1204) -- You can now load Dockerfile from standard input even when the build context is - coming from external Git or HTTP URL. [docker/buildx#994](https://github.com/docker/buildx/issues/994) -- Build commands now support new the build context type `oci-layout://` for loading - [build context from local OCI layout directories](/reference/cli/docker/buildx/build.md#source-oci-layout). - Note that this feature depends on an unreleased BuildKit feature and builder - instance from `moby/buildkit:master` needs to be used until BuildKit v0.11 is - released. [docker/buildx#1173](https://github.com/docker/buildx/issues/1173) -- You can now use the new `--print` flag to run helper functions supported by the - BuildKit frontend performing the build and print their results. You can use - this feature in Dockerfile to show the build arguments and secrets that the - current build supports with `--print=outline` and list all available - Dockerfile stages with `--print=targets`. This feature is experimental for - gathering early feedback and requires enabling `BUILDX_EXPERIMENTAL=1` - environment variable. We plan to update/extend this feature in the future - without keeping backward compatibility. [docker/buildx#1100](https://github.com/docker/buildx/issues/1100), - [docker/buildx#1272](https://github.com/docker/buildx/issues/1272) -- You can now use the new `--invoke` flag to launch interactive containers from - build results for an interactive debugging cycle. You can reload these - containers with code changes or restore them to an initial state from the - special monitor mode. This feature is experimental for gathering early - feedback and requires enabling `BUILDX_EXPERIMENTAL=1` environment variable. - We plan to update/extend this feature in the future without enabling backward - compatibility. - [docker/buildx#1168](https://github.com/docker/buildx/issues/1168), - [docker/buildx#1257](https://github.com/docker/buildx), - [docker/buildx#1259](https://github.com/docker/buildx/issues/1259) -- Buildx now understands environment variable `BUILDKIT_COLORS` and `NO_COLOR` - to customize/disable the colors of interactive build progressbar. [docker/buildx#1230](https://github.com/docker/buildx/issues/1230), - [docker/buildx#1226](https://github.com/docker/buildx/issues/1226) -- `buildx ls` command now shows the current BuildKit version of each builder - instance. [docker/buildx#998](https://github.com/docker/buildx/issues/998) -- The `bake` command now loads `.env` file automatically when building Compose - files for compatibility. [docker/buildx#1261](https://github.com/docker/buildx/issues/1261) -- Bake now supports Compose files with `cache_to` definition. [docker/buildx#1155](https://github.com/docker/buildx/issues/1155) -- Bake now supports new builtin function `timestamp()` to access current time. [docker/buildx#1214](https://github.com/docker/buildx/issues/1214) -- Bake now supports Compose build secrets definition. [docker/buildx#1069](https://github.com/docker/buildx/issues/1069) -- Additional build context configuration is now supported in Compose files via `x-bake`. [docker/buildx#1256](https://github.com/docker/buildx/issues/1256) -- Inspecting builder now shows current driver options configuration. [docker/buildx#1003](https://github.com/docker/buildx/issues/1003), - [docker/buildx#1066](https://github.com/docker/buildx/issues/1066) - -### Updates - -- Updated the Compose Specification to 1.4.0. [docker/buildx#1246](https://github.com/docker/buildx/issues/1246), - [docker/buildx#1251](https://github.com/docker/buildx/issues/1251) - -### Bug fixes and enhancements - -- The `buildx ls` command output has been updated with better access to errors - from different builders. [docker/buildx#1109](https://github.com/docker/buildx/issues/1109) -- The `buildx create` command now performs additional validation of builder parameters - to avoid creating a builder instance with invalid configuration. [docker/buildx#1206](https://github.com/docker/buildx/issues/1206) -- The `buildx imagetools create` command can now create new multi-platform images - even if the source subimages are located on different repositories or - registries. [docker/buildx#1137](https://github.com/docker/buildx/issues/1137) -- You can now set the default builder config that is used when creating - builder instances without passing custom `--config` value. [docker/buildx#1111](https://github.com/docker/buildx/issues/1111) -- Docker driver can now detect if `dockerd` instance supports initially - disabled Buildkit features like multi-platform images. [docker/buildx#1260](https://github.com/docker/buildx/issues/1260), - [docker/buildx#1262](https://github.com/docker/buildx/issues/1262) -- Compose files using targets with `.` in the name are now converted to use `_` - so the selector keys can still be used in such targets. [docker/buildx#1011](https://github.com/docker/buildx/issues/1011) -- Included an additional validation for checking valid driver configurations. [docker/buildx#1188](https://github.com/docker/buildx/issues/1188), - [docker/buildx#1273](https://github.com/docker/buildx/issues/1273) -- The `remove` command now displays the removed builder and forbids removing - context builders. [docker/buildx#1128](https://github.com/docker/buildx/issues/1128) -- Enable Azure authentication when using Kubernetes driver. [docker/buildx#974](https://github.com/docker/buildx/issues/974) -- Add tolerations handling for kubernetes driver. [docker/buildx#1045](https://github.com/docker/buildx/issues/1045) - [docker/buildx#1053](https://github.com/docker/buildx/issues/1053) -- Replace deprecated seccomp annotations with `securityContext` in the `kubernetes` driver. - [docker/buildx#1052](https://github.com/docker/buildx/issues/1052) -- Fix panic on handling manifests with nil platform. [docker/buildx#1144](https://github.com/docker/buildx/issues/1144) -- Fix using duration filter with `prune` command. [docker/buildx#1252](https://github.com/docker/buildx/issues/1252) -- Fix merging multiple JSON files on Bake definition. [docker/buildx#1025](https://github.com/docker/buildx/issues/1025) -- Fix issues with implicit builder created from Docker context had invalid - configuration or dropped connection. [docker/buildx#1129](https://github.com/docker/buildx/issues/1129) -- Fix conditions for showing no-output warning when using named contexts. [docker/buildx#968](https://github.com/docker/buildx/issues/968) -- Fix duplicating builders when builder instance and docker context have the - same name. [docker/buildx#1131](https://github.com/docker/buildx/issues/1131) -- Fix printing unnecessary SSH warning logs. [docker/buildx#1085](https://github.com/docker/buildx/issues/1085) -- Fix possible panic when using an empty variable block with Bake JSON - definition. [docker/buildx#1080](https://github.com/docker/buildx/issues/1080) -- Fix image tools commands not handling `--builder` flag correctly. [docker/buildx#1067](https://github.com/docker/buildx/issues/1067) -- Fix using custom image together with rootless option. [docker/buildx#1063](https://github.com/docker/buildx/issues/1063) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.9.0). - -## 0.8.2 - -{{< release-date date="2022-04-04" >}} - -### Updates - -- Update Compose spec used by `buildx bake` to v1.2.1 to fix parsing ports definition. [docker/buildx#1033](https://github.com/docker/buildx/issues/1033) - -### Bug fixes and enhancements - -- Fix possible crash on handling progress streams from BuildKit v0.10. [docker/buildx#1042](https://github.com/docker/buildx/issues/1042) -- Fix parsing groups in `buildx bake` when already loaded by a parent group. [docker/buildx#1021](https://github.com/docker/buildx/issues/1021) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.8.2). - -## 0.8.1 - -{{< release-date date="2022-03-21" >}} - -### Bug fixes and enhancements - -- Fix possible panic on handling build context scanning errors. [docker/buildx#1005](https://github.com/docker/buildx/issues/1005) -- Allow `.` on Compose target names in `buildx bake` for backward compatibility. [docker/buildx#1018](https://github.com/docker/buildx/issues/1018) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.8.1). - -## 0.8.0 - -{{< release-date date="2022-03-09" >}} - -### New - -- Build command now accepts `--build-context` flag to [define additional named build contexts](/reference/cli/docker/buildx/build/#build-context) - for your builds. [docker/buildx#904](https://github.com/docker/buildx/issues/904) -- Bake definitions now support [defining dependencies between targets](bake/contexts.md) - and using the result of one target in another build. - [docker/buildx#928](https://github.com/docker/buildx/issues/928), - [docker/buildx#965](https://github.com/docker/buildx/issues/965), - [docker/buildx#963](https://github.com/docker/buildx/issues/963), - [docker/buildx#962](https://github.com/docker/buildx/issues/962), - [docker/buildx#981](https://github.com/docker/buildx/issues/981) -- `imagetools inspect` now accepts `--format` flag allowing access to config - and buildinfo for specific images. [docker/buildx#854](https://github.com/docker/buildx/issues/854), - [docker/buildx#972](https://github.com/docker/buildx/issues/972) -- New flag `--no-cache-filter` allows configuring build, so it ignores cache - only for specified Dockerfile stages. [docker/buildx#860](https://github.com/docker/buildx/issues/860) -- Builds can now show a summary of warnings sets by the building frontend. [docker/buildx#892](https://github.com/docker/buildx/issues/892) -- The new build argument `BUILDKIT_INLINE_BUILDINFO_ATTRS` allows opting-in to embed - building attributes to resulting image. [docker/buildx#908](https://github.com/docker/buildx/issues/908) -- The new flag `--keep-buildkitd` allows keeping BuildKit daemon running when removing a builder - - [docker/buildx#852](https://github.com/docker/buildx/issues/852) - -### Bug fixes and enhancements - -- `--metadata-file` output now supports embedded structure types. [docker/buildx#946](https://github.com/docker/buildx/issues/946) -- `buildx rm` now accepts new flag `--all-inactive` for removing all builders - that are not currently running. [docker/buildx#885](https://github.com/docker/buildx/issues/885) -- Proxy config is now read from Docker configuration file and sent with build - requests for backward compatibility. [docker/buildx#959](https://github.com/docker/buildx/issues/959) -- Support host networking in Compose. [docker/buildx#905](https://github.com/docker/buildx/issues/905), - [docker/buildx#880](https://github.com/docker/buildx/issues/880) -- Bake files can now be read from stdin with `-f -`. [docker/buildx#864](https://github.com/docker/buildx/issues/864) -- `--iidfile` now always writes the image config digest independently of the - driver being used (use `--metadata-file` for digest). [docker/buildx#980](https://github.com/docker/buildx/issues/980) -- Target names in Bake are now restricted to not use special characters. [docker/buildx#929](https://github.com/docker/buildx/issues/929) -- Image manifest digest can be read from metadata when pushed with `docker` - driver. [docker/buildx#989](https://github.com/docker/buildx/issues/989) -- Fix environment file handling in Compose files. [docker/buildx#905](https://github.com/docker/buildx/issues/905) -- Show last access time in `du` command. [docker/buildx#867](https://github.com/docker/buildx/issues/867) -- Fix possible double output logs when multiple Bake targets run same build - steps. [docker/buildx#977](https://github.com/docker/buildx/issues/977) -- Fix possible errors on multi-node builder building multiple targets with - mixed platform. [docker/buildx#985](https://github.com/docker/buildx/issues/985) -- Fix some nested inheritance cases in Bake. [docker/buildx#914](https://github.com/docker/buildx/issues/914) -- Fix printing default group on Bake files. [docker/buildx#884](https://github.com/docker/buildx/issues/884) -- Fix `UsernsMode` when using rootless container. [docker/buildx#887](https://github.com/docker/buildx/issues/887) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.8.0). - -## 0.7.1 - -{{< release-date date="2021-08-25" >}} - -### Fixes - -- Fix issue with matching exclude rules in `.dockerignore`. [docker/buildx#858](https://github.com/docker/buildx/issues/858) -- Fix `bake --print` JSON output for current group. [docker/buildx#857](https://github.com/docker/buildx/issues/857) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.7.1). - -## 0.7.0 - -{{< release-date date="2021-11-10" >}} - -### New features - -- TLS certificates from BuildKit configuration are now transferred to build - container with `docker-container` and `kubernetes` drivers. [docker/buildx#787](https://github.com/docker/buildx/issues/787) -- Builds support `--ulimit` flag for feature parity. [docker/buildx#800](https://github.com/docker/buildx/issues/800) -- Builds support `--shm-size` flag for feature parity. [docker/buildx#790](https://github.com/docker/buildx/issues/790) -- Builds support `--quiet` for feature parity. [docker/buildx#740](https://github.com/docker/buildx/issues/740) -- Builds support `--cgroup-parent` flag for feature parity. [docker/buildx#814](https://github.com/docker/buildx/issues/814) -- Bake supports builtin variable `BAKE_LOCAL_PLATFORM`. [docker/buildx#748](https://github.com/docker/buildx/issues/748) -- Bake supports `x-bake` extension field in Compose files. [docker/buildx#721](https://github.com/docker/buildx/issues/721) -- `kubernetes` driver now supports colon-separated `KUBECONFIG`. [docker/buildx#761](https://github.com/docker/buildx/issues/761) -- `kubernetes` driver now supports setting Buildkit config file with `--config`. [docker/buildx#682](https://github.com/docker/buildx/issues/682) -- `kubernetes` driver now supports installing QEMU emulators with driver-opt. [docker/buildx#682](https://github.com/docker/buildx/issues/682) - -### Enhancements - -- Allow using custom registry configuration for multi-node pushes from the - client. [docker/buildx#825](https://github.com/docker/buildx/issues/825) -- Allow using custom registry configuration for `buildx imagetools` command. [docker/buildx#825](https://github.com/docker/buildx/issues/825) -- Allow booting builder after creating with `buildx create --bootstrap`. [docker/buildx#692](https://github.com/docker/buildx/issues/692) -- Allow `registry:insecure` output option for multi-node pushes. [docker/buildx#825](https://github.com/docker/buildx/issues/825) -- BuildKit config and TLS files are now kept in Buildx state directory and - reused if BuildKit instance needs to be recreated. [docker/buildx#824](https://github.com/docker/buildx/issues/824) -- Ensure different projects use separate destination directories for - incremental context transfer for better performance. [docker/buildx#817](https://github.com/docker/buildx/issues/817) -- Build containers are now placed on separate cgroup by default. [docker/buildx#782](https://github.com/docker/buildx/issues/782) -- Bake now prints the default group with `--print`. [docker/buildx#720](https://github.com/docker/buildx/issues/720) -- `docker` driver now dials build session over HTTP for better performance. [docker/buildx#804](https://github.com/docker/buildx/issues/804) - -### Fixes - -- Fix using `--iidfile` together with a multi-node push. [docker/buildx#826](https://github.com/docker/buildx/issues/826) -- Using `--push` in Bake does not clear other image export options in the file. [docker/buildx#773](https://github.com/docker/buildx/issues/773) -- Fix Git URL detection for `buildx bake` when `https` protocol was used. [docker/buildx#822](https://github.com/docker/buildx/issues/822) -- Fix pushing image with multiple names on multi-node builds. [docker/buildx#815](https://github.com/docker/buildx/issues/815) -- Avoid showing `--builder` flags for commands that don't use it. [docker/buildx#818](https://github.com/docker/buildx/issues/818) -- Unsupported build flags now show a warning. [docker/buildx#810](https://github.com/docker/buildx/issues/810) -- Fix reporting error details in some OpenTelemetry traces. [docker/buildx#812](https://github.com/docker/buildx/issues/812) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.7.0). - -## 0.6.3 - -{{< release-date date="2021-08-30" >}} - -### Fixes - -- Fix BuildKit state volume location for Windows clients. [docker/buildx#751](https://github.com/docker/buildx/issues/751) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.6.3). - -## 0.6.2 - -{{< release-date date="2021-08-21" >}} - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.6.2). - -### Fixes - -- Fix connection error showing up in some SSH configurations. [docker/buildx#741](https://github.com/docker/buildx/issues/741) - -## 0.6.1 - -{{< release-date date="2021-07-30" >}} - -### Enhancements - -- Set `ConfigFile` to parse compose files with Bake. [docker/buildx#704](https://github.com/docker/buildx/issues/704) - -### Fixes - -- Duplicate progress env var. [docker/buildx#693](https://github.com/docker/buildx/issues/693) -- Should ignore nil client. [docker/buildx#686](https://github.com/docker/buildx/issues/686) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.6.1). - -## 0.6.0 - -{{< release-date date="2021-07-16" >}} - -### New features - -- Support for OpenTelemetry traces and forwarding Buildx client traces to - BuildKit. [docker/buildx#635](https://github.com/docker/buildx/issues/635) -- Experimental GitHub Actions remote cache backend with `--cache-to type=gha` - and `--cache-from type=gha`. [docker/buildx#535](https://github.com/docker/buildx/issues/535) -- New `--metadata-file` flag has been added to build and Bake command that - allows saving build result metadata in JSON format. [docker/buildx#605](https://github.com/docker/buildx/issues/605) -- This is the first release supporting Windows ARM64. [docker/buildx#654](https://github.com/docker/buildx/issues/654) -- This is the first release supporting Linux Risc-V. [docker/buildx#652](https://github.com/docker/buildx/issues/652) -- Bake now supports building from remote definition with local files or - another remote source as context. [docker/buildx#671](https://github.com/docker/buildx/issues/671) -- Bake now allows variables to reference each other and using user functions - in variables and vice-versa. - [docker/buildx#575](https://github.com/docker/buildx/issues/575), - [docker/buildx#539](https://github.com/docker/buildx/issues/539), - [docker/buildx#532](https://github.com/docker/buildx/issues/532) -- Bake allows defining attributes in the global scope. [docker/buildx#541](https://github.com/docker/buildx/issues/541) -- Bake allows variables across multiple files. [docker/buildx#538](https://github.com/docker/buildx/issues/538) -- New quiet mode has been added to progress printer. [docker/buildx#558](https://github.com/docker/buildx/issues/558) -- `kubernetes` driver now supports defining resources/limits. [docker/buildx#618](https://github.com/docker/buildx/issues/618) -- Buildx binaries can now be accessed through [buildx-bin](https://hub.docker.com/r/docker/buildx-bin) - Docker image. [docker/buildx#656](https://github.com/docker/buildx/issues/656) - -### Enhancements - -- `docker-container` driver now keeps BuildKit state in volume. Enabling - updates with keeping state. [docker/buildx#672](https://github.com/docker/buildx/issues/672) -- Compose parser is now based on new [compose-go parser](https://github.com/compose-spec/compose-go) - fixing support for some newer syntax. [docker/buildx#669](https://github.com/docker/buildx/issues/669) -- SSH socket is now automatically forwarded when building an ssh-based git URL. [docker/buildx#581](https://github.com/docker/buildx/issues/581) -- Bake HCL parser has been rewritten. [docker/buildx#645](https://github.com/docker/buildx/issues/645) -- Extend HCL support with more functions. [docker/buildx#491](https://github.com/docker/buildx/issues/491) - [docker/buildx#503](https://github.com/docker/buildx/issues/503) -- Allow secrets from environment variables. [docker/buildx#488](https://github.com/docker/buildx/issues/488) -- Builds with an unsupported multi-platform and load configuration now fail fast. [docker/buildx#582](https://github.com/docker/buildx/issues/582) -- Store Kubernetes config file to make buildx builder switchable. [docker/buildx#497](https://github.com/docker/buildx/issues/497) -- Kubernetes now lists all pods as nodes on inspection. [docker/buildx#477](https://github.com/docker/buildx/issues/477) -- Default Rootless image has been set to `moby/buildkit:buildx-stable-1-rootless`. [docker/buildx#480](https://github.com/docker/buildx/issues/480) - -### Fixes - -- `imagetools create` command now correctly merges JSON descriptor with old one. [docker/buildx#592](https://github.com/docker/buildx/issues/592) -- Fix building with `--network=none` not requiring extra security entitlements. [docker/buildx#531](https://github.com/docker/buildx/issues/531) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.6.0). - -## 0.5.1 - -{{< release-date date="2020-12-15" >}} - -### Fixes - -- Fix regression on setting `--platform` on `buildx create` outside - `kubernetes` driver. [docker/buildx#475](https://github.com/docker/buildx/issues/475) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.5.1). - -## 0.5.0 - -{{< release-date date="2020-12-15" >}} - -### New features - -- The `docker` driver now supports the `--push` flag. [docker/buildx#442](https://github.com/docker/buildx/issues/442) -- Bake supports inline Dockerfiles. [docker/buildx#398](https://github.com/docker/buildx/issues/398) -- Bake supports building from remote URLs and Git repositories. [docker/buildx#398](https://github.com/docker/buildx/issues/398) -- `BUILDX_CONFIG` env var allow users to have separate buildx state from - Docker config. [docker/buildx#385](https://github.com/docker/buildx/issues/385) -- `BUILDKIT_MULTI_PLATFORM` build arg allows to force building multi-platform - return objects even if only one `--platform` specified. [docker/buildx#467](https://github.com/docker/buildx/issues/467) - -### Enhancements - -- Allow `--append` to be used with `kubernetes` driver. [docker/buildx#370](https://github.com/docker/buildx/issues/370) -- Build errors show error location in source files and system stacktraces - with `--debug`. [docker/buildx#389](https://github.com/docker/buildx/issues/389) -- Bake formats HCL errors with source definition. [docker/buildx#391](https://github.com/docker/buildx/issues/391) -- Bake allows empty string values in arrays that will be discarded. [docker/buildx#428](https://github.com/docker/buildx/issues/428) -- You can now use the Kubernetes cluster config with the `kubernetes` driver. [docker/buildx#368](https://github.com/docker/buildx/issues/368) - [docker/buildx#460](https://github.com/docker/buildx/issues/460) -- Creates a temporary token for pulling images instead of sharing credentials - when possible. [docker/buildx#469](https://github.com/docker/buildx/issues/469) -- Ensure credentials are passed when pulling BuildKit container image. [docker/buildx#441](https://github.com/docker/buildx/issues/441) - [docker/buildx#433](https://github.com/docker/buildx/issues/433) -- Disable user namespace remapping in `docker-container` driver. [docker/buildx#462](https://github.com/docker/buildx/issues/462) -- Allow `--builder` flag to switch to default instance. [docker/buildx#425](https://github.com/docker/buildx/issues/425) -- Avoid warn on empty `BUILDX_NO_DEFAULT_LOAD` config value. [docker/buildx#390](https://github.com/docker/buildx/issues/390) -- Replace error generated by `quiet` option by a warning. [docker/buildx#403](https://github.com/docker/buildx/issues/403) -- CI has been switched to GitHub Actions. - [docker/buildx#451](https://github.com/docker/buildx/issues/451), - [docker/buildx#463](https://github.com/docker/buildx/issues/463), - [docker/buildx#466](https://github.com/docker/buildx/issues/466), - [docker/buildx#468](https://github.com/docker/buildx/issues/468), - [docker/buildx#471](https://github.com/docker/buildx/issues/471) - -### Fixes - -- Handle lowercase Dockerfile name as a fallback for backward compatibility. [docker/buildx#444](https://github.com/docker/buildx/issues/444) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.5.0). - -## 0.4.2 - -{{< release-date date="2020-08-22" >}} - -### New features - -- Support `cacheonly` exporter. [docker/buildx#337](https://github.com/docker/buildx/issues/337) - -### Enhancements - -- Update `go-cty` to pull in more `stdlib` functions. [docker/buildx#277](https://github.com/docker/buildx/issues/277) -- Improve error checking on load. [docker/buildx#281](https://github.com/docker/buildx/issues/281) - -### Fixes - -- Fix parsing json config with HCL. [docker/buildx#280](https://github.com/docker/buildx/issues/280) -- Ensure `--builder` is wired from root options. [docker/buildx#321](https://github.com/docker/buildx/issues/321) -- Remove warning for multi-platform iidfile. [docker/buildx#351](https://github.com/docker/buildx/issues/351) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.4.2). - -## 0.4.1 - -{{< release-date date="2020-05-01" >}} - -### Fixes - -- Fix regression on flag parsing. [docker/buildx#268](https://github.com/docker/buildx/issues/268) -- Fix using pull and no-cache keys in HCL targets. [docker/buildx#268](https://github.com/docker/buildx/issues/268) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.4.1). - -## 0.4.0 - -{{< release-date date="2020-04-30" >}} - -### New features - -- Add `kubernetes` driver. [docker/buildx#167](https://github.com/docker/buildx/issues/167) -- New global `--builder` flag to override builder instance for a single command. [docker/buildx#246](https://github.com/docker/buildx/issues/246) -- New `prune` and `du` commands for managing local builder cache. [docker/buildx#249](https://github.com/docker/buildx/issues/249) -- You can now set the new `pull` and `no-cache` options for HCL targets. [docker/buildx#165](https://github.com/docker/buildx/issues/165) - -### Enhancements - -- Upgrade Bake to HCL2 with support for variables and functions. [docker/buildx#192](https://github.com/docker/buildx/issues/192) -- Bake now supports `--load` and `--push`. [docker/buildx#164](https://github.com/docker/buildx/issues/164) -- Bake now supports wildcard overrides for multiple targets. [docker/buildx#164](https://github.com/docker/buildx/issues/164) -- Container driver allows setting environment variables via `driver-opt`. [docker/buildx#170](https://github.com/docker/buildx/issues/170) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.4.0). - -## 0.3.1 - -{{< release-date date="2019-09-27" >}} - -### Enhancements - -- Handle copying unix sockets instead of erroring. [docker/buildx#155](https://github.com/docker/buildx/issues/155) - [moby/buildkit#1144](https://github.com/moby/buildkit/issues/1144) - -### Fixes - -- Running Bake with multiple Compose files now merges targets correctly. [docker/buildx#134](https://github.com/docker/buildx/issues/134) -- Fix bug when building a Dockerfile from stdin (`build -f -`). - [docker/buildx#153](https://github.com/docker/buildx/issues/153) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.3.1). - -## 0.3.0 - -{{< release-date date="2019-08-02" >}} - -### New features - -- Custom `buildkitd` daemon flags. [docker/buildx#102](https://github.com/docker/buildx/issues/102) -- Driver-specific options on `create`. [docker/buildx#122](https://github.com/docker/buildx/issues/122) - -### Enhancements - -- Environment variables are used in Compose files. [docker/buildx#117](https://github.com/docker/buildx/issues/117) -- Bake now honors `--no-cache` and `--pull`. [docker/buildx#118](https://github.com/docker/buildx/issues/118) -- Custom BuildKit config file. [docker/buildx#121](https://github.com/docker/buildx/issues/121) -- Entitlements support with `build --allow`. [docker/buildx#104](https://github.com/docker/buildx/issues/104) - -### Fixes - -- Fix bug where `--build-arg foo` would not read `foo` from environment. [docker/buildx#116](https://github.com/docker/buildx/issues/116) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.3.0). - -## 0.2.2 - -{{< release-date date="2019-05-30" >}} - -### Enhancements - -- Change Compose file handling to require valid service specifications. [docker/buildx#87](https://github.com/docker/buildx/issues/87) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.2.2). - -## 0.2.1 - -{{< release-date date="2019-05-25" >}} - -### New features - -- Add `BUILDKIT_PROGRESS` env var. [docker/buildx#69](https://github.com/docker/buildx/issues/69) -- Add `local` platform. [docker/buildx#70](https://github.com/docker/buildx/issues/70) - -### Enhancements - -- Keep arm variant if one is defined in the config. [docker/buildx#68](https://github.com/docker/buildx/issues/68) -- Make dockerfile relative to context. [docker/buildx#83](https://github.com/docker/buildx/issues/83) - -### Fixes - -- Fix parsing target from compose files. [docker/buildx#53](https://github.com/docker/buildx/issues/53) - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.2.1). - -## 0.2.0 - -{{< release-date date="2019-04-25" >}} - -### New features - -- First release - -For more details, see the complete release notes in the [Buildx GitHub repository](https://github.com/docker/buildx/releases/tag/v0.2.0). +params: + sidebar: + goto: "https://github.com/docker/buildx/releases" +--- \ No newline at end of file diff --git a/content/manuals/compose/_index.md b/content/manuals/compose/_index.md index 8f16cd62fed..4a4de3d1b14 100644 --- a/content/manuals/compose/_index.md +++ b/content/manuals/compose/_index.md @@ -3,12 +3,10 @@ title: Docker Compose weight: 30 description: Learn how to use Docker Compose to define and run multi-container applications with this detailed introduction to the tool. -keywords: docker compose, docker-compose, docker compose command, docker compose files, - docker compose documentation, using docker compose, compose container, docker compose - service +keywords: docker compose, docker-compose, compose.yaml, docker compose command, multi-container applications, container orchestration, docker cli params: sidebar: - group: Open source + group: Application development grid: - title: Why use Compose? description: Understand Docker Compose's key benefits @@ -30,26 +28,27 @@ grid: - title: View the release notes description: Find out about the latest enhancements and bug fixes. icon: note_add - link: /compose/release-notes + link: "https://github.com/docker/compose/releases" - title: Explore the Compose file reference description: Find information on defining services, networks, and volumes for a Docker application. icon: polyline link: /reference/compose-file +- title: Use Compose Bridge + description: Transform your Compose configuration file into configuration files for different platforms, such as Kubernetes. + icon: move_down + link: /compose/bridge - title: Browse common FAQs description: Explore general FAQs and find out how to give feedback. icon: help link: /compose/faq -- title: Migrate to Compose v2 - description: Learn how to migrate from Compose v1 to v2 - icon: folder_delete - link: /compose/releases/migrate/ aliases: - /compose/cli-command/ - /compose/networking/swarm/ - /compose/overview/ - /compose/swarm/ - /compose/completion/ +- /compose/releases/migrate/ --- Docker Compose is a tool for defining and running multi-container applications. @@ -58,12 +57,12 @@ It is the key to unlocking a streamlined and efficient development and deploymen Compose simplifies the control of your entire application stack, making it easy to manage services, networks, and volumes in a single YAML configuration file. Then, with a single command, you create and start all the services from your configuration file. -Compose works in all environments; production, staging, development, testing, as +Compose works in all environments - production, staging, development, testing, as well as CI workflows. It also has commands for managing the whole lifecycle of your application: - * Start, stop, and rebuild services - * View the status of running services - * Stream the log output of running services - * Run a one-off command on a service + - Start, stop, and rebuild services + - View the status of running services + - Stream the log output of running services + - Run a one-off command on a service {{< grid >}} diff --git a/content/manuals/compose/bridge/_index.md b/content/manuals/compose/bridge/_index.md index b84dcb8dba6..781a6f2cfb7 100644 --- a/content/manuals/compose/bridge/_index.md +++ b/content/manuals/compose/bridge/_index.md @@ -1,6 +1,6 @@ --- -description: Understand what Compose Bridge is and how it can be useful -keywords: compose, orchestration, kubernetes, bridge +description: Learn how Compose Bridge transforms Docker Compose files into Kubernetes manifests for seamless platform transitions +keywords: docker compose bridge, compose to kubernetes, docker compose kubernetes integration, docker compose kustomize, compose bridge docker desktop title: Overview of Compose Bridge linkTitle: Compose Bridge weight: 50 @@ -8,15 +8,20 @@ weight: 50 {{< summary-bar feature_name="Compose bridge" >}} -Compose Bridge lets you transform your Compose configuration file into configuration files for different platforms, primarily focusing on Kubernetes. The default transformation generates Kubernetes manifests and a Kustomize overlay which are designed for deployment on Docker Desktop with Kubernetes enabled. +Compose Bridge converts your Docker Compose configuration into platform-specific deployment formats such as Kubernetes manifests. By default, it generates: -It's a flexible tool that lets you either take advantage of the [default transformation](usage.md) or [create a custom transformation](customize.md) to suit specific project needs and requirements. +- Kubernetes manifests +- A Kustomize overlay -Compose Bridge significantly simplifies the transition from Docker Compose to Kubernetes, making it easier for you to leverage the power of Kubernetes while maintaining the simplicity and efficiency of Docker Compose. +These outputs are ready for deployment on Docker Desktop with [Kubernetes enabled](/manuals/desktop/settings-and-maintenance/settings.md#kubernetes). + +Compose Bridge helps you bridge the gap between Compose and Kubernetes, making it easier to adopt Kubernetes while keeping the simplicity and efficiency of Compose. + +It's a flexible tool that lets you either take advantage of the [default transformation](usage.md) or [create a custom transformation](customize.md) to suit specific project needs and requirements. ## How it works -Compose Bridge uses transformations to let you convert a Compose model into another form. +Compose Bridge uses transformations to convert a Compose model into another form. A transformation is packaged as a Docker image that receives the fully resolved Compose model as `/in/compose.yaml` and can produce any target format file under `/out`. @@ -24,22 +29,31 @@ Compose Bridge provides its own transformation for Kubernetes using Go templates For more detailed information on how these transformations work and how you can customize them for your projects, see [Customize](customize.md). -## Setup +Compose Bridge also supports applications that use LLMs via Docker Model Runner. + +For more details, see [Use Model Runner](use-model-runner.md). + +## Apply organizational standards at scale + + +Compose Bridge supports custom transformation templates, which lets platform teams encode +organizational standards once and apply them consistently whenever a `compose.yaml` file +is converted to Kubernetes manifests or other formats. -To get started with Compose Bridge, you need to: +Developers continue to write standard Compose files. During conversion, Compose Bridge +runs your custom transformation and automatically injects the required security contexts, +resource limits, labels, and network policies into the output manifests — without +requiring developers to know or manage those details. -1. Download and install Docker Desktop version 4.33 and later. -2. Sign in to your Docker account. -3. Navigate to the **Features in development** tab in **Settings**. -4. From the **Experimental features** tab, select **Enable Compose Bridge**. -5. Select **Apply & restart**. +When your requirements change, update the transformation template in one place. Every team +picks up the changes on their next conversion, with no edits to individual Compose files. -## Feedback +This separation of concerns keeps developers focused on application configuration, while +platform teams control governance and enforce policy through the transformation layer. -To give feedback, report bugs, or receive support, email `desktop-preview@docker.com`. There is also a dedicated Slack channel. To join, simply send an email to the provided address. +To get started, see [Customize Compose Bridge](/manuals/compose/bridge/customize.md). ## What's next? - [Use Compose Bridge](usage.md) - [Explore how you can customize Compose Bridge](customize.md) -- [Explore the advanced integration](advanced-integration.md) diff --git a/content/manuals/compose/bridge/advanced-integration.md b/content/manuals/compose/bridge/advanced-integration.md deleted file mode 100644 index db9e7183754..00000000000 --- a/content/manuals/compose/bridge/advanced-integration.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Advanced integration -linkTitle: Advanced -weight: 30 -description: Learn about how Compose Bridge can function a kubectl plugin -keywords: kubernetes, compose, compose bridge, plugin, advanced ---- - -{{< summary-bar feature_name="Compose bridge" >}} - -Compose Bridge can also function as a `kubectl` plugin, allowing you to integrate its capabilities directly into your Kubernetes command-line operations. This integration simplifies the process of converting and deploying applications from Docker Compose to Kubernetes. - -## Use `compose-bridge` as a `kubectl` plugin - -To use the `compose-bridge` binary as a `kubectl` plugin, you need to make sure that the binary is available in your PATH and the name of the binary is prefixed with `kubectl-`. - -1. Rename or copy the `compose-bridge` binary to `kubectl-compose_bridge`: - - ```console - $ mv /path/to/compose-bridge /usr/local/bin/kubectl-compose_bridge - ``` - -2. Ensure that the binary is executable: - - ```console - $ chmod +x /usr/local/bin/kubectl-compose_bridge - ``` - -3. Verify that the plugin is recognized by `kubectl`: - - ```console - $ kubectl plugin list - ``` - - In the output, you should see `kubectl-compose_bridge`. - -4. Now you can use `compose-bridge` as a `kubectl` plugin: - - ```console - $ kubectl compose-bridge [command] - ``` - -Replace `[command]` with any `compose-bridge` command you want to use. diff --git a/content/manuals/compose/bridge/customize.md b/content/manuals/compose/bridge/customize.md index d978ecf6b9f..3834e65939b 100644 --- a/content/manuals/compose/bridge/customize.md +++ b/content/manuals/compose/bridge/customize.md @@ -2,13 +2,15 @@ title: Customize Compose Bridge linkTitle: Customize weight: 20 -description: Learn about the Compose Bridge templates syntax -keywords: compose, bridge, templates +description: Learn how to customize Compose Bridge transformations using Go templates and Compose extensions +keywords: docker compose bridge, customize compose bridge, compose bridge templates, compose to kubernetes, compose bridge transformation, go templates docker --- {{< summary-bar feature_name="Compose bridge" >}} -This page explains how Compose Bridge utilizes templating to efficiently translate Docker Compose files into Kubernetes manifests. It also explain how you can customize these templates for your specific requirements and needs, or how you can build your own transformation. +You can customize how Compose Bridge converts your Docker Compose files into platform-specific formats. + +This page explains how Compose Bridge uses templating to generate Kubernetes manifests and how you can customize these templates for your specific requirements and needs, or how you can build your own transformation. ## How it works @@ -16,11 +18,11 @@ Compose bridge uses transformations to let you convert a Compose model into anot A transformation is packaged as a Docker image that receives the fully-resolved Compose model as `/in/compose.yaml` and can produce any target format file under `/out`. -Compose Bridge provides its transformation for Kubernetes using Go templates, so that it is easy to extend for customization by just replacing or appending your own templates. +Compose Bridge includes a default Kubernetes transformation using Go templates, which you can customize by replacing or extending templates. -### Syntax +### Template syntax -Compose Bridge make use of templates to transform a Compose configuration file into Kubernetes manifests. Templates are plain text files that use the [Go templating syntax](https://pkg.go.dev/text/template). This enables the insertion of logic and data, making the templates dynamic and adaptable according to the Compose model. +Compose Bridge makes use of templates to transform a Compose configuration file into Kubernetes manifests. Templates are plain text files that use the [Go templating syntax](https://pkg.go.dev/text/template). This enables the insertion of logic and data, making the templates dynamic and adaptable according to the Compose model. When a template is executed, it must produce a YAML file which is the standard format for Kubernetes manifests. Multiple files can be generated as long as they are separated by `---` @@ -42,9 +44,11 @@ key: value {{ end }} ``` -### Input +### Input model + +You can generate the input model by running `docker compose config`. -The input Compose model is the canonical YAML model you can get by running `docker compose config`. Within the templates, data from the `compose.yaml` is accessed using dot notation, allowing you to navigate through nested data structures. For example, to access the deployment mode of a service, you would use `service.deploy.mode`: +This canonical YAML output serves as the input for Compose Bridge transformations. Within the templates, data from the `compose.yaml` is accessed using dot notation, allowing you to navigate through nested data structures. For example, to access the deployment mode of a service, you would use `service.deploy.mode`: ```yaml # iterate over a yaml sequence @@ -52,26 +56,28 @@ The input Compose model is the canonical YAML model you can get by running `doc # access a nested attribute using dot notation {{ if eq $service.deploy.mode "global" }} kind: DaemonSet - {{ end }} -{{ end }} + {{ end }} +{{ end }} ``` -You can check the [Compose Specification JSON schema](https://github.com/compose-spec/compose-go/blob/main/schema/compose-spec.json) to have a full overview of the Compose model. This schema outlines all possible configurations and their data types in the Compose model. +You can check the [Compose Specification JSON schema](https://github.com/compose-spec/compose-go/blob/main/schema/compose-spec.json) for a full overview of the Compose model. This schema outlines all possible configurations and their data types in the Compose model. -### Helpers +### Helper functions As part of the Go templating syntax, Compose Bridge offers a set of YAML helper functions designed to manipulate data within the templates efficiently: -- `seconds`: Converts a [duration](/reference/compose-file/extension.md#specifying-durations) into an integer -- `uppercase`: Converts a string into upper case characters -- `title`: Converts a string by capitalizing the first letter of each word -- `safe`: Converts a string into a safe identifier, replacing all characters (except lowercase a-z) with `-` -- `truncate`: Removes the N first elements from a list -- `join`: Groups elements from a list into a single string, using a separator -- `base64`: Encodes a string as base64 used in Kubernetes for encoding secrets -- `map`: Transforms a value according to mappings expressed as `"value -> newValue"` strings -- `indent`: Writes string content indented by N spaces -- `helmValue`: Writes the string content as a template value in the final file +| Function | Description | +| ----------- | ----------------------------------------------------------------------------------------------------------- | +| `seconds` | Converts a [duration](/reference/compose-file/extension.md#specifying-durations) into an integer (seconds). | +| `uppercase` | Converts a string to uppercase. | +| `title` | Capitalizes the first letter of each word. | +| `safe` | Converts a string into a safe identifier (replaces non-lowercase characters with `-`). | +| `truncate` | Removes the first N elements from a list. | +| `join` | Joins list elements into a single string with a separator. | +| `base64` | Encodes a string as base64 (used for Kubernetes secrets). | +| `map` | Maps values using `“value -> newValue”` syntax. | +| `indent` | Indents string content by N spaces. | +| `helmValue` | Outputs a Helm-style template value. | In the following example, the template checks if a healthcheck interval is specified for a service, applies the `seconds` function to convert this interval into seconds and assigns the value to the `periodSeconds` attribute. @@ -81,38 +87,65 @@ In the following example, the template checks if a healthcheck interval is speci {{ end }} ``` -## Customization +## Customize the default templates As Kubernetes is a versatile platform, there are many ways to map Compose concepts into Kubernetes resource definitions. Compose Bridge lets you customize the transformation to match your own infrastructure -decisions and preferences, with various level of flexibility and effort. +decisions and preferences, with varying level of flexibility and effort. ### Modify the default templates -You can extract templates used by the default transformation `docker/compose-bridge-kubernetes`, -by running `compose-bridge transformations create --from docker/compose-bridge-kubernetes my-template` -and adjusting the templates to match your needs. +You can extract templates used by the default transformation `docker/compose-bridge-kubernetes`: + +```console +$ docker compose bridge transformations create --from docker/compose-bridge-kubernetes my-template +``` + +The templates are extracted into a directory named after your template name, in this case `my-template`. It includes: + +- A Dockerfile that lets you create your own image to distribute your template +- A directory containing the templating files + +Edit, [add](#add-your-own-templates), or remove templates as needed. -The templates are extracted into a directory named after your template name, in this case `my-template`. -It includes a Dockerfile that lets you create your own image to distribute your template, as well as a directory containing the templating files. -You are free to edit the existing files, delete them, or [add new ones](#add-your-own-templates) to subsequently generate Kubernetes manifests that meet your needs. You can then use the generated Dockerfile to package your changes into a new transformation image, which you can then use with Compose Bridge: ```console $ docker build --tag mycompany/transform --push . ``` -You can then use your transformation as a replacement: +Use your transformation as a replacement: ```console -$ compose-bridge convert --transformations mycompany/transform +$ docker compose bridge convert --transformations mycompany/transform ``` +#### Model Runner templates + +The default transformation also includes templates for applications that use LLMs: + +- `model-runner-deployment.tmpl`: Generates the Kubernetes deployment for Docker Model Runner. Customize it to change replica counts, image tags, resource requests and limits, GPU scheduling settings, tolerations, or additional environment variables. +- `model-runner-service.tmpl`: Builds the service that exposes Docker Model Runner. Update it to switch between `ClusterIP`, `NodePort`, or `LoadBalancer` types, adjust ports, or add annotations for ingress and service meshes. +- `model-runner-pvc.tmpl`: Defines the persistent volume claim used to store downloaded models. Edit it to set storage size, storage class, access modes, or volume annotations required by your storage provider. +- `/overlays/model-runner/kustomization.yaml`: Kustomize overlay applied when you deploy Model Runner to a standalone Kubernetes cluster. Extend it to add patches for labels and annotations, attach `NetworkPolicies`, or include extra manifests. +- `/overlays/desktop/deployment.tmpl`: Desktop-specific deployment template that keeps the in-cluster Model Runner scaled down and points workloads to the host endpoint. Adjust it if you change the Desktop endpoint or want to deploy Model Runner on Desktop instead of relying on the host service. + +Common customization scenarios: + +- Enable GPU support by adding vendor-specific resource requests, limits, and node selectors in `model-runner-deployment.tmpl`. +- Increase or tune storage for model artifacts by editing `model-runner-pvc.tmpl` to set the desired size, storage class, or access mode. +- Expose Model Runner outside the cluster by switching the service type in `model-runner-service.tmpl` or adding ingress annotations in the model-runner overlay. +- Align cluster policies by adding labels, annotations, or NetworkPolicies through `/overlays/model-runner/kustomization.yaml`. + +For more details, see [Use Model Runner](use-model-runner.md). + ### Add your own templates For resources that are not managed by Compose Bridge's default transformation, -you can build your own templates. The `compose.yaml` model may not offer all +you can build your own templates. + +The `compose.yaml` model may not offer all the configuration attributes required to populate the target manifest. If this is the case, you can then rely on Compose custom extensions to better describe the application, and offer an agnostic transformation. @@ -152,7 +185,7 @@ when transforming Compose models into Kubernetes in addition to other transformations: ```console -$ compose-bridge convert \ +$ docker compose bridge convert \ --transformation docker/compose-bridge-kubernetes \ --transformation mycompany/transform ``` @@ -184,7 +217,3 @@ CMD ["/usr/bin/kompose", "convert", "-f", "/in/compose.yaml", "--out", "/out"] This Dockerfile bundles Kompose and defines the command to run this tool according to the Compose Bridge transformation contract. - -## What's next? - -- [Explore the advanced integration](advanced-integration.md) diff --git a/content/manuals/compose/bridge/usage.md b/content/manuals/compose/bridge/usage.md index 091457fbeef..1f25bc2dcc3 100644 --- a/content/manuals/compose/bridge/usage.md +++ b/content/manuals/compose/bridge/usage.md @@ -2,13 +2,15 @@ title: Use the default Compose Bridge transformation linkTitle: Usage weight: 10 -description: Learn about and use the Compose Bridge default transformation -keywords: compose, bridge, kubernetes +description: Learn how to use the default Compose Bridge transformation to convert Compose files into Kubernetes manifests +keywords: docker compose bridge, compose kubernetes transform, kubernetes from compose, compose bridge convert, compose.yaml to kubernetes --- {{< summary-bar feature_name="Compose bridge" >}} -Compose Bridge supplies an out-of-the box transformation for your Compose configuration file. Based on an arbitrary `compose.yaml` file, Compose Bridge produces: +Compose Bridge includes a built-in transformation that automatically converts your Compose configuration into a set of Kubernetes manifests. + +Based on your `compose.yaml` file, it produces: - A [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) so all your resources are isolated and don't conflict with resources from other deployments. - A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) with an entry for each and every [config](/reference/compose-file/configs.md) resource in your Compose application. @@ -22,69 +24,97 @@ Compose Bridge supplies an out-of-the box transformation for your Compose config It also supplies a Kustomize overlay dedicated to Docker Desktop with: - `Loadbalancer` for services which need to expose ports on host. - A `PersistentVolumeClaim` to use the Docker Desktop storage provisioner `desktop-storage-provisioner` to handle volume provisioning more effectively. - - A Kustomize file to link all the resources together. + - A `Kustomization.yaml` file to link all the resources together. + +If your Compose file defines a `models` section for a service, Compose Bridge automatically configures your deployment so your service can locate and use its models via Docker Model Runner. + +For each declared model, the transformation injects two environment variables: + +- `_URL`: The endpoint for Docker Model Runner serving that model +- `_MODEL`: The model’s name or identifier + +You can optionally customize these variable names using `endpoint_var` and `model_var`. + +The default transformation generates two different overlays - one for Docker Desktop whilst using a local instance of Docker Model Runner, and a `model-runner` overlay with all the relevant Kubernetes resources to deploy Docker Model Runner in a pod. + +| Environment | Endpoint | +| -------------- | ----------------------------------------------- | +| Docker Desktop | `http://host.docker.internal:12434/engines/v1/` | +| Kubernetes | `http://model-runner/engines/v1/` | + + +For more details, see [Use Model Runner](use-model-runner.md). ## Use the default Compose Bridge transformation -To use the default transformation run the following command: +To convert your Compose file using the default transformation: ```console -$ compose-bridge convert +$ docker compose bridge convert ``` -Compose looks for a `compose.yaml` file inside the current directory and then converts it. +Compose looks for a `compose.yaml` file inside the current directory and generates Kubernetes manifests. + +Example output: -The following output is displayed ```console -$ compose-bridge convert -f compose.yaml -Kubernetes resource api-deployment.yaml created -Kubernetes resource db-deployment.yaml created -Kubernetes resource web-deployment.yaml created -Kubernetes resource api-expose.yaml created -Kubernetes resource db-expose.yaml created -Kubernetes resource web-expose.yaml created -Kubernetes resource 0-avatars-namespace.yaml created +$ docker compose -f compose.yaml bridge convert +Kubernetes resource backend-deployment.yaml created +Kubernetes resource frontend-deployment.yaml created +Kubernetes resource backend-expose.yaml created +Kubernetes resource frontend-expose.yaml created +Kubernetes resource 0-my-project-namespace.yaml created Kubernetes resource default-network-policy.yaml created -Kubernetes resource private-network-policy.yaml created -Kubernetes resource public-network-policy.yaml created -Kubernetes resource db-db_data-persistentVolumeClaim.yaml created -Kubernetes resource api-service.yaml created -Kubernetes resource web-service.yaml created +Kubernetes resource backend-service.yaml created +Kubernetes resource frontend-service.yaml created Kubernetes resource kustomization.yaml created -Kubernetes resource db-db_data-persistentVolumeClaim.yaml created -Kubernetes resource api-service.yaml created -Kubernetes resource web-service.yaml created +Kubernetes resource backend-deployment.yaml created +Kubernetes resource frontend-deployment.yaml created +Kubernetes resource backend-service.yaml created +Kubernetes resource frontend-service.yaml created +Kubernetes resource kustomization.yaml created +Kubernetes resource model-runner-configmap.yaml created +Kubernetes resource model-runner-deployment.yaml created +Kubernetes resource model-runner-service.yaml created +Kubernetes resource model-runner-volume-claim.yaml created Kubernetes resource kustomization.yaml created ``` -These files are then stored within your project in the `/out` folder. +All generated files are stored in the `/out` directory in your project. -The Kubernetes manifests can then be used to run the application on Kubernetes using -the standard deployment command `kubectl apply -k out/overlays/desktop/`. +## Deploy the generated manifests -> [!NOTE] +> [!IMPORTANT] > -> Make sure you have enabled Kubernetes in Docker Desktop before you deploy your Compose Bridge transformations. - -If you want to convert a `compose.yaml` file that is located in another directory, you can run: - -```console -$ compose-bridge convert -f /compose.yaml -``` +> Before you deploy your Compose Bridge transformations, make sure you have [enabled Kubernetes](/manuals/desktop/settings-and-maintenance/settings.md#kubernetes) in Docker Desktop. -To see all available flags, run: +Once the manifests are generated, deploy them to your local Kubernetes cluster: ```console -$ compose-bridge convert --help +$ kubectl apply -k out/overlays/desktop/ ``` > [!TIP] > -> You can now convert and deploy your Compose project to a Kubernetes cluster from the Compose file viewer. +> You can convert and deploy your Compose project to a Kubernetes cluster from the Compose file viewer. > > Make sure you are signed in to your Docker account, navigate to your container in the **Containers** view, and in the top-right corner select **View configurations** and then **Convert and Deploy to Kubernetes**. +## Additional commands + +Convert a `compose.yaml` file located in another directory: + +```console +$ docker compose -f /compose.yaml bridge convert +``` + +To see all available flags, run: + +```console +$ docker compose bridge convert --help +``` + ## What's next? - [Explore how you can customize Compose Bridge](customize.md) -- [Explore the advanced integration](advanced-integration.md) +- [Use Model Runner](use-model-runner.md). \ No newline at end of file diff --git a/content/manuals/compose/bridge/use-model-runner.md b/content/manuals/compose/bridge/use-model-runner.md new file mode 100644 index 00000000000..1f976c8b4ae --- /dev/null +++ b/content/manuals/compose/bridge/use-model-runner.md @@ -0,0 +1,91 @@ +--- +title: Use Docker Model Runner with Compose Bridge +linkTitle: Use Model Runner +weight: 30 +description: How to use Docker Model Runner with Compose Bridge for consistent deployments +keywords: docker compose bridge, customize compose bridge, compose bridge templates, compose to kubernetes, compose bridge transformation, go templates docker, model runner, ai, llms +--- + +Compose Bridge supports model-aware deployments. It can deploy and configure Docker Model Runner, a lightweight service that hosts and serves machine LLMs. + +This reduces manual setup for LLM-enabled services and keeps deployments consistent across Docker Desktop and Kubernetes environments. + +If you have a `models` top-level element in your `compose.yaml` file, Compose Bridge: + +- Automatically injects environment variables for each model’s endpoint and name. +- Configures model endpoints differently for Docker Desktop vs Kubernetes. +- Optionally deploys Docker Model Runner in Kubernetes when enabled in Helm values + +## Configure model runner settings + +When deploying using generated Helm Charts, you can control the model runner configuration through Helm values. + +```yaml +# Model Runner settings +modelRunner: + # Set to false for Docker Desktop (uses host instance) + # Set to true for standalone Kubernetes clusters + enabled: false + # Endpoint used when enabled=false (Docker Desktop) + hostEndpoint: "http://host.docker.internal:12434/engines/v1/" + # Deployment settings when enabled=true + image: "docker/model-runner:latest" + imagePullPolicy: "IfNotPresent" + # GPU support + gpu: + enabled: false + vendor: "nvidia" # nvidia or amd + count: 1 + # Node scheduling (uncomment and customize as needed) + # nodeSelector: + # accelerator: nvidia-tesla-t4 + # tolerations: [] + # affinity: {} + + # Security context + securityContext: + allowPrivilegeEscalation: false + # Environment variables (uncomment and add as needed) + # env: + # DMR_ORIGINS: "http://localhost:31246" + resources: + limits: + cpu: "1000m" + memory: "2Gi" + requests: + cpu: "100m" + memory: "256Mi" + # Storage for models + storage: + size: "100Gi" + storageClass: "" # Empty uses default storage class + # Models to pre-pull + models: + - ai/qwen2.5:latest + - ai/mxbai-embed-large +``` + +## Deploying a model runner + +### Docker Desktop + +When `modelRunner.enabled` is `false`, Compose Bridge configures your workloads to connect to Docker Model Runner running on the host: + +```text +http://host.docker.internal:12434/engines/v1/ +``` + +The endpoint is automatically injected into your service containers. + +### Kubernetes + +When `modelRunner.enabled` is `true`, Compose Bridge uses the generated manifests to deploy Docker Model Runner in your cluster, including: + +- Deployment: Runs the `docker-model-runner` container +- Service: Exposes port `80` (maps to container port `12434`) +- `PersistentVolumeClaim`: Stores model files + +The `modelRunner.enabled` setting also determines the number of replicas for the `model-runner-deployment`: + +- When `true`, the deployment replica count is set to 1, and Docker Model Runner is deployed in the Kubernetes cluster. +- When `false`, the replica count is 0, and no Docker Model Runner resources are deployed. \ No newline at end of file diff --git a/content/manuals/compose/compose-sdk.md b/content/manuals/compose/compose-sdk.md new file mode 100644 index 00000000000..09ac2335ad3 --- /dev/null +++ b/content/manuals/compose/compose-sdk.md @@ -0,0 +1,182 @@ +--- +description: Integrate Docker Compose directly into your applications with the Compose SDK. +keywords: docker compose sdk, compose api, Docker developer SDK +title: Using the Compose SDK +linkTitle: Compose SDK +weight: 60 +params: + sidebar: + badge: + color: green + text: New +--- + +{{< summary-bar feature_name="Compose SDK" >}} + +The `docker/compose` package can be used as a Go library by third-party applications to programmatically manage +containerized applications defined in Compose files. This SDK provides a comprehensive API that lets you +integrate Compose functionality directly into your applications, allowing you to load, validate, and manage +multi-container environments without relying on the Compose CLI. + +Whether you need to orchestrate containers as part of +a deployment pipeline, build custom management tools, or embed container orchestration into your application, the +Compose SDK offers the same powerful capabilities that drive the Docker Compose command-line tool. + +## Set up the SDK + +To get started, create an SDK instance using the `NewComposeService()` function, which initializes a service with the +necessary configuration to interact with the Docker daemon and manage Compose projects. This service instance provides +methods for all core Compose operations including creating, starting, stopping, and removing containers, as well as +loading and validating Compose files. The service handles the underlying Docker API interactions and resource +management, allowing you to focus on your application logic. + +### Requirements + +Before using the SDK, make sure you're using a compatible version of the Docker CLI. + +```go +require ( + github.com/docker/cli v28.5.2+incompatible +) +``` + +Docker CLI version 29.0.0 and later depends on the new `github.com/moby/moby` module, whereas Docker Compose v5 currently depends on `github.com/docker/docker`. This means you need to pin `docker/cli v28.5.2+incompatible` to ensure compatibility and avoid build errors. + +### Example usage + +Here's a basic example demonstrating how to load a Compose project and start the services: + +```go +package main + +import ( + "context" + "log" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/flags" + "github.com/docker/compose/v5/pkg/api" + "github.com/docker/compose/v5/pkg/compose" +) + +func main() { + ctx := context.Background() + + dockerCLI, err := command.NewDockerCli() + if err != nil { + log.Fatalf("Failed to create docker CLI: %v", err) + } + err = dockerCLI.Initialize(&flags.ClientOptions{}) + if err != nil { + log.Fatalf("Failed to initialize docker CLI: %v", err) + } + + // Create a new Compose service instance + service, err := compose.NewComposeService(dockerCLI) + if err != nil { + log.Fatalf("Failed to create compose service: %v", err) + } + + // Load the Compose project from a compose file + project, err := service.LoadProject(ctx, api.ProjectLoadOptions{ + ConfigPaths: []string{"compose.yaml"}, + ProjectName: "my-app", + }) + if err != nil { + log.Fatalf("Failed to load project: %v", err) + } + + // Start the services defined in the Compose file + err = service.Up(ctx, project, api.UpOptions{ + Create: api.CreateOptions{}, + Start: api.StartOptions{}, + }) + if err != nil { + log.Fatalf("Failed to start services: %v", err) + } + + log.Printf("Successfully started project: %s", project.Name) +} +``` + +This example demonstrates the core workflow - creating a service instance, loading a project from a Compose file, and +starting the services. The SDK provides many additional operations for managing the lifecycle of your containerized +application. + +## Customizing the SDK + +The `NewComposeService()` function accepts optional `compose.Option` parameters to customize the SDK behavior. These +options allow you to configure I/O streams, concurrency limits, dry-run mode, and other advanced features. + +```go + // Create a custom output buffer to capture logs + var outputBuffer bytes.Buffer + + // Create a compose service with custom options + service, err := compose.NewComposeService(dockerCLI, + compose.WithOutputStream(&outputBuffer), // Redirect output to custom writer + compose.WithErrorStream(os.Stderr), // Use stderr for errors + compose.WithMaxConcurrency(4), // Limit concurrent operations + compose.WithPrompt(compose.AlwaysOkPrompt()), // Auto-confirm all prompts + ) +``` + +### Available options + +- `WithOutputStream(io.Writer)`: Redirect standard output to a custom writer +- `WithErrorStream(io.Writer)`: Redirect error output to a custom writer +- `WithInputStream(io.Reader)`: Provide a custom input stream for interactive prompts +- `WithStreams(out, err, in)`: Set all I/O streams at once +- `WithMaxConcurrency(int)`: Limit the number of concurrent operations against the Docker API +- `WithPrompt(Prompt)`: Customize user confirmation behavior (use `AlwaysOkPrompt()` for non-interactive mode) +- `WithDryRun`: Run operations in dry-run mode without actually applying changes +- `WithContextInfo(api.ContextInfo)`: Set custom Docker context information +- `WithProxyConfig(map[string]string)`: Configure HTTP proxy settings for builds +- `WithEventProcessor(progress.EventProcessor)`: Receive progress events and operation notifications + +These options provide fine-grained control over the SDK's behavior, making it suitable for various integration +scenarios including CLI tools, web services, automation scripts, and testing environments. + +## Tracking operations with `EventProcessor` + +The `EventProcessor` interface allows you to monitor Compose operations in real-time by receiving events about changes +applied to Docker resources such as images, containers, volumes, and networks. This is particularly useful for building +user interfaces, logging systems, or monitoring tools that need to track the progress of Compose operations. + +### Understanding `EventProcessor` + +A Compose operation, such as `up`, `down`, `build`, performs a series of changes to Docker resources. The +`EventProcessor` receives notifications about these changes through three key methods: + +- `Start(ctx, operation)`: Called when a Compose operation begins, for example `up` +- `On(events...)`: Called with progress events for individual resource changes, for example, container starting, image + being pulled +- `Done(operation, success)`: Called when the operation completes, indicating success or failure + +Each event contains information about the resource being modified, its current status, and progress indicators when +applicable (such as download progress for image pulls). + +### Event status types + +Events report resource changes with the following status types: + +- Working: Operation is in progress, for example, creating, starting, pulling +- Done: Operation completed successfully +- Warning: Operation completed with warnings +- Error: Operation failed + +Common status text values include: `Creating`, `Created`, `Starting`, `Started`, `Running`, `Stopping`, `Stopped`, +`Removing`, `Removed`, `Building`, `Built`, `Pulling`, `Pulled`, and more. + +### Built-in `EventProcessor` implementations + +The SDK provides three ready-to-use `EventProcessor` implementations: + +- `progress.NewTTYWriter(io.Writer)`: Renders an interactive terminal UI with progress bars and task lists + (similar to the Docker Compose CLI output) +- `progress.NewPlainWriter(io.Writer)`: Outputs simple text-based progress messages suitable for non-interactive + environments or log files +- `progress.NewJSONWriter()`: Render events as JSON objects +- `progress.NewQuietWriter()`: (Default) Silently processes events without producing any output + +Using `EventProcessor`, a custom UI can be plugged into `docker/compose`. \ No newline at end of file diff --git a/content/manuals/compose/gettingstarted.md b/content/manuals/compose/gettingstarted.md index 70edc888e4a..8f5bcb071be 100644 --- a/content/manuals/compose/gettingstarted.md +++ b/content/manuals/compose/gettingstarted.md @@ -1,5 +1,5 @@ --- -description: Check out this tutorial on how to use Docker Compose from defining application +description: Follow this hands-on tutorial to learn how to use Docker Compose from defining application dependencies to experimenting with commands. keywords: docker compose example, docker compose tutorial, how to use docker compose, running docker compose, how to run docker compose, docker compose build image, docker @@ -8,15 +8,14 @@ keywords: docker compose example, docker compose tutorial, how to use docker com title: Docker Compose Quickstart linkTitle: Quickstart weight: 30 +aliases: +- /compose/samples-for-compose/ +- /compose/support-and-feedback/samples-for-compose/ --- This tutorial aims to introduce fundamental concepts of Docker Compose by guiding you through the development of a basic Python web application. -Using the Flask framework, the application features a hit counter in Redis, providing a practical example of how Docker Compose can be applied in web development scenarios. - -The concepts demonstrated here should be understandable even if you're not familiar with Python. - -This is a non-normative example that demonstrates core Compose functionality. +Using the Flask framework, the application features a hit counter in Redis, providing a practical example of how Docker Compose can be applied in web development scenarios. The concepts demonstrated here should be understandable even if you're not familiar with Python. ## Prerequisites @@ -25,330 +24,506 @@ Make sure you have: - [Installed the latest version of Docker Compose](/manuals/compose/install/_index.md) - A basic understanding of Docker concepts and how Docker works -## Step 1: Set up +## Step 1: Set up the project 1. Create a directory for the project: ```console - $ mkdir composetest - $ cd composetest + $ mkdir compose-demo + $ cd compose-demo ``` -2. Create a file called `app.py` in your project directory and paste the following code in: +2. Create `app.py` in your project directory and add the following: ```python - import time - + import os import redis from flask import Flask app = Flask(__name__) - cache = redis.Redis(host='redis', port=6379) - - def get_hit_count(): - retries = 5 - while True: - try: - return cache.incr('hits') - except redis.exceptions.ConnectionError as exc: - if retries == 0: - raise exc - retries -= 1 - time.sleep(0.5) - - @app.route('/') - def hello(): - count = get_hit_count() - return f'Hello World! I have been seen {count} times.\n' - ``` + cache = redis.Redis( + host=os.getenv("REDIS_HOST", "redis"), + port=int(os.getenv("REDIS_PORT", "6379")), + ) - In this example, `redis` is the hostname of the redis container on the - application's network and the default port, `6379` is used. + @app.route("/") + def hello(): + count = cache.incr("hits") + return f"Hello from Docker! I have been seen {count} time(s).\n" + ``` - > [!NOTE] - > - > Note the way the `get_hit_count` function is written. This basic retry - > loop attempts the request multiple times if the Redis service is - > not available. This is useful at startup while the application comes - > online, but also makes the application more resilient if the Redis - > service needs to be restarted anytime during the app's lifetime. In a - > cluster, this also helps handling momentary connection drops between - > nodes. + The app reads its Redis connection details from environment variables, with sensible defaults so it works out of the box. -3. Create another file called `requirements.txt` in your project directory and - paste the following code in: +3. Create `requirements.txt` in your project directory and add the following: ```text flask redis ``` -4. Create a `Dockerfile` and paste the following code in: +4. Create a `Dockerfile`: ```dockerfile # syntax=docker/dockerfile:1 - FROM python:3.10-alpine - WORKDIR /code - ENV FLASK_APP=app.py + FROM python:3.12-alpine # Builds an image with the Python 3.12 image + WORKDIR /code # Sets the working directory to `/code` + ENV FLASK_APP=app.py # Sets environment variables used by the `flask` command ENV FLASK_RUN_HOST=0.0.0.0 - RUN apk add --no-cache gcc musl-dev linux-headers - COPY requirements.txt requirements.txt - RUN pip install -r requirements.txt + RUN apk add --no-cache gcc musl-dev linux-headers # Installs `gcc` and other dependencies + COPY requirements.txt . # Copies `requirements.txt` + RUN pip install -r requirements.txt # Installs the Python dependencies + COPY . . # Copies the current directory `.` in the project to the workdir `.` in the image EXPOSE 5000 - COPY . . - CMD ["flask", "run", "--debug"] + CMD ["flask", "run", "--debug"] # Sets the default command for the container to `flask run --debug` ``` - {{< accordion title="Understand the Dockerfile" >}} + > [!IMPORTANT] + > + > Make sure the file is named `Dockerfile` with no extension. Some editors add `.txt` + > automatically, which causes the build to fail. - This tells Docker to: + For more information on how to write Dockerfiles, see the [Dockerfile reference](/reference/dockerfile/). - * Build an image starting with the Python 3.10 image. - * Set the working directory to `/code`. - * Set environment variables used by the `flask` command. - * Install gcc and other dependencies - * Copy `requirements.txt` and install the Python dependencies. - * Add metadata to the image to describe that the container is listening on port 5000 - * Copy the current directory `.` in the project to the workdir `.` in the image. - * Set the default command for the container to `flask run --debug`. +5. Create a `.env` file to hold configuration values: - {{< /accordion >}} + ```text + APP_PORT=8000 + REDIS_HOST=redis + REDIS_PORT=6379 + ``` - > [!IMPORTANT] - > - >Check that the `Dockerfile` has no file extension like `.txt`. Some editors may append this file extension automatically which results in an error when you run the application. + Compose automatically reads `.env` and makes these values available for interpolation + in your `compose.yaml`. For this example the gains are modest, but in practice, + keeping configuration out of the Compose file makes it easier to: + - Change values across environments without editing YAML + - Avoid committing secrets to version control + - Reuse values across multiple services - For more information on how to write Dockerfiles, see the [Dockerfile reference](/reference/dockerfile/). +6. Create a `.dockerignore` file to keep unnecessary files out of your build context: + + ```text + .env + *.pyc + __pycache__ + redis-data + ``` -## Step 2: Define services in a Compose file + Docker sends everything in your project directory to the daemon when it builds an image. + Without `.dockerignore`, that includes your `.env` file (which may contain secrets) and + any cached Python bytecode. Excluding them keeps builds fast and avoids accidentally + baking sensitive values into an image layer. -Compose simplifies the control of your entire application stack, making it easy to manage services, networks, and volumes in a single, comprehensible YAML configuration file. +## Step 2: Define and start your services -Create a file called `compose.yaml` in your project directory and paste -the following: +Compose simplifies the control of your entire application stack, making it easy to manage services, networks, and volumes in a single YAML configuration file. -```yaml -services: - web: - build: . - ports: - - "8000:5000" - redis: - image: "redis:alpine" -``` +1. Create `compose.yaml` in your project directory and paste the following: -This Compose file defines two services: `web` and `redis`. + ```yaml + services: + web: + build: . + ports: + - "${APP_PORT}:5000" + environment: + - REDIS_HOST=${REDIS_HOST} + - REDIS_PORT=${REDIS_PORT} -The `web` service uses an image that's built from the `Dockerfile` in the current directory. -It then binds the container and the host machine to the exposed port, `8000`. This example service uses the default port for the Flask web server, `5000`. + redis: + image: redis:alpine + ``` -The `redis` service uses a public [Redis](https://registry.hub.docker.com/_/redis/) -image pulled from the Docker Hub registry. + This Compose file defines two services: -For more information on the `compose.yaml` file, see [How Compose works](compose-application-model.md). + - The `web` service uses an image that's built from the `Dockerfile` in the current directory. It maps port `8000` on the host to port `5000` on the container where Flask listens by default. -## Step 3: Build and run your app with Compose + - The `redis` service uses a public [Redis](https://registry.hub.docker.com/_/redis/) image pulled from the Docker Hub registry. -With a single command, you create and start all the services from your configuration file. + For more information on the `compose.yaml` file, see [How Compose works](compose-application-model.md). -1. From your project directory, start up your application by running `docker compose up`. +2. Start up your application: ```console $ docker compose up + ``` + + With a single command, you create and start all the services from your configuration file. Compose builds your web image, pulls the Redis image, and starts both containers. - Creating network "composetest_default" with the default driver - Creating composetest_web_1 ... - Creating composetest_redis_1 ... - Creating composetest_web_1 - Creating composetest_redis_1 ... done - Attaching to composetest_web_1, composetest_redis_1 - web_1 | * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit) - redis_1 | 1:C 17 Aug 22:11:10.480 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo - redis_1 | 1:C 17 Aug 22:11:10.480 # Redis version=4.0.1, bits=64, commit=00000000, modified=0, pid=1, just started - redis_1 | 1:C 17 Aug 22:11:10.480 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf - web_1 | * Restarting with stat - redis_1 | 1:M 17 Aug 22:11:10.483 * Running mode=standalone, port=6379. - redis_1 | 1:M 17 Aug 22:11:10.483 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. - web_1 | * Debugger is active! - redis_1 | 1:M 17 Aug 22:11:10.483 # Server initialized - redis_1 | 1:M 17 Aug 22:11:10.483 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled. - web_1 | * Debugger PIN: 330-787-903 - redis_1 | 1:M 17 Aug 22:11:10.483 * Ready to accept connections +3. Open `http://localhost:8000`. You should see: + + ```text + Hello from Docker! I have been seen 1 time(s). ``` - Compose pulls a Redis image, builds an image for your code, and starts the - services you defined. In this case, the code is statically copied into the image at build time. + Refresh the page — the counter increments on each visit. -2. Enter `http://localhost:8000/` in a browser to see the application running. + This minimal setup works, but it has two problems you'll fix in the next steps: - If this doesn't resolve, you can also try `http://127.0.0.1:8000`. + - Startup race: `web` starts at the same time as `redis`. If Redis isn't ready yet, + the Flask app fails to connect and crashes. + - No persistence: If you run `docker compose down` followed by `docker compose up`, the + counter resets to zero. `docker compose down` removes the containers, and with them + any data written to the container's writable layer. `docker compose stop` preserves + the containers so data survives, but you can't rely on that in production where + containers are regularly replaced. - You should see a message in your browser saying: +4. Stop the stack before moving on: - ```text - Hello World! I have been seen 1 times. + ```console + $ docker compose down ``` - ![hello world in browser](images/quick-hello-world-1.png) +## Step 3: Fix the startup race with health checks -3. Refresh the page. +To fix the startup race, Compose needs to wait until `redis` is confirmed healthy before +starting `web`. - The number should increment. +1. Update `compose.yaml`: - ```text - Hello World! I have been seen 2 times. + ```yaml + services: + web: + build: . + ports: + - "${APP_PORT}:5000" + environment: + - REDIS_HOST=${REDIS_HOST} + - REDIS_PORT=${REDIS_PORT} + depends_on: + redis: + condition: service_healthy + + redis: + image: redis:alpine + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 3s + retries: 5 + start_period: 10s ``` - ![hello world in browser](images/quick-hello-world-2.png) + The `healthcheck` block tells Compose how to test whether Redis is ready: -4. Switch to another terminal window, and type `docker image ls` to list local images. + - `test` is the command Compose runs inside the container to check its health. + `redis-cli ping` connects to Redis and expects a `PONG` response — if it gets one, + the container is healthy. + - `start_period` gives Redis 10 seconds to initialize before health checks begin. + Any failures during this window don't count toward the retry limit. + - `interval` runs the check every 5 seconds after the start period has elapsed. + - `timeout` gives each check 3 seconds to respond before treating it as a failure. + - `retries` sets how many consecutive failures are allowed before Compose marks the + container as unhealthy. With `interval: 5s` and `retries: 5`, Compose will wait up + to 25 seconds before giving up. - Listing images at this point should return `redis` and `web`. +2. Start the stack to confirm the ordering is fixed: ```console - $ docker image ls - - REPOSITORY TAG IMAGE ID CREATED SIZE - composetest_web latest e2c21aa48cc1 4 minutes ago 93.8MB - python 3.4-alpine 84e6077c7ab6 7 days ago 82.5MB - redis alpine 9d8fa9aa0e5b 3 weeks ago 27.5MB + $ docker compose up ``` - You can inspect images with `docker inspect `. - -5. Stop the application, either by running `docker compose down` - from within your project directory in the second terminal, or by - hitting `CTRL+C` in the original terminal where you started the app. - -## Step 4: Edit the Compose file to use Compose Watch - -Edit the `compose.yaml` file in your project directory to use `watch` so you can preview your running Compose services which are automatically updated as you edit and save your code: - -```yaml -services: - web: - build: . - ports: - - "8000:5000" - develop: - watch: - - action: sync - path: . - target: /code - redis: - image: "redis:alpine" -``` + You should see something similar to: -Whenever a file is changed, Compose syncs the file to the corresponding location under `/code` inside the container. Once copied, the bundler updates the running application without a restart. + ```text + [+] Running 2/2 + ✔ Container compose-demo-redis-1 Healthy 0.0s + ``` -For more information on how Compose Watch works, see [Use Compose Watch](/manuals/compose/how-tos/file-watch.md). Alternatively, see [Manage data in containers](/manuals/engine/storage/volumes.md) for other options. +3. Open `http://localhost:8000` to confirm the app is still working, then stop the stack before moving on: -> [!NOTE] -> -> For this example to work, the `--debug` option is added to the `Dockerfile`. The `--debug` option in Flask enables automatic code reload, making it possible to work on the backend API without the need to restart or rebuild the container. -> After changing the `.py` file, subsequent API calls will use the new code, but the browser UI will not automatically refresh in this small example. Most frontend development servers include native live reload support that works with Compose. + ```console + $ docker compose down + ``` -## Step 5: Re-build and run the app with Compose +## Step 4: Enable Compose Watch for live updates -From your project directory, type `docker compose watch` or `docker compose up --watch` to build and launch the app and start the file watch mode. +Without Compose Watch, every code change requires you to stop the stack, rebuild the image, and restart the containers. Compose Watch eliminates that cycle by automatically syncing changes into your running container as you save files. -```console -$ docker compose watch -[+] Running 2/2 - ✔ Container docs-redis-1 Created 0.0s - ✔ Container docs-web-1 Recreated 0.1s -Attaching to redis-1, web-1 - ⦿ watch enabled -... -``` +1. Update `compose.yaml` to add the `develop.watch` block to the `web` service: + + ```yaml + services: + web: + build: . + ports: + - "${APP_PORT}:5000" + environment: + - REDIS_HOST=${REDIS_HOST} + - REDIS_PORT=${REDIS_PORT} + depends_on: + redis: + condition: service_healthy + develop: + watch: + - action: sync+restart + path: . + target: /code + - action: rebuild + path: requirements.txt -Check the `Hello World` message in a web browser again, and refresh to see the -count increment. + redis: + image: redis:alpine + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 3s + retries: 5 + start_period: 10s + ``` -## Step 6: Update the application + The `watch` block defines two rules: + - The `sync+restart` action watches your project directory (`.`) on the host. When a file changes, Compose copies any changed files into `/code` inside the running container, then restarts the container. Because the container restarts with the updated files already in place, Flask starts up reading the new code directly — no manual rebuild or restart needed. + - The `rebuild` action on `requirements.txt` triggers a full image rebuild whenever you add a new dependency, since installing packages requires rebuilding the image, not just syncing files. + +2. Start the stack with Watch enabled: -To see Compose Watch in action: + ```console + $ docker compose up --watch + ``` -1. Change the greeting in `app.py` and save it. For example, change the `Hello World!` -message to `Hello from Docker!`: +3. Make a live change. Open `app.py` and update the greeting: ```python - return f'Hello from Docker! I have been seen {count} times.\n' + return f"Hello from Compose Watch! I have been seen {count} time(s).\n" ``` -2. Refresh the app in your browser. The greeting should be updated, and the -counter should still be incrementing. +4. Save the file. Compose Watch detects the change and syncs it immediately: + + ```text + Syncing service "web" after changes were detected + ``` - ![hello world in browser](images/quick-hello-world-3.png) +5. Refresh `http://localhost:8000`. The updated greeting appears without any restart + and the counter should still be incrementing. -3. Once you're done, run `docker compose down`. +6. Stop the stack before moving on: -## Step 7: Split up your services + ```console + $ docker compose down + ``` -Using multiple Compose files lets you customize a Compose application for different environments or workflows. This is useful for large applications that may use dozens of containers, with ownership distributed across multiple teams. + For more information on how Compose Watch works, see [Use Compose Watch](/manuals/compose/how-tos/file-watch.md). -1. In your project folder, create a new Compose file called `infra.yaml`. +## Step 5: Persist data with named volumes -2. Cut the Redis service from your `compose.yaml` file and paste it into your new `infra.yaml` file. Make sure you add the `services` top-level attribute at the top of your file. Your `infra.yaml` file should now look like this: +Each time you stop and restart the stack the visit counter resets to zero. Redis data +lives inside the container, so it disappears when the container is removed. A named +volume fixes this by storing the data on the host, outside the container lifecycle. + +1. Update `compose.yaml`: ```yaml services: + web: + build: . + ports: + - "${APP_PORT}:5000" + environment: + - REDIS_HOST=${REDIS_HOST} + - REDIS_PORT=${REDIS_PORT} + depends_on: + redis: + condition: service_healthy + develop: + watch: + - action: sync+restart + path: . + target: /code + - action: rebuild + path: requirements.txt + redis: - image: "redis:alpine" + image: redis:alpine + volumes: + - redis-data:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 3s + retries: 5 + start_period: 10s + + volumes: + redis-data: ``` -3. In your `compose.yaml` file, add the `include` top-level attribute along with the path to the `infra.yaml` file. + The `redis-data:/data` entry under `redis.volumes` mounts the named volume at `/data`, the path where Redis + writes its data files. The top-level `volumes` key registers it with Docker so it + persists between `compose down` and `compose up` cycles. + +2. Start the stack with `docker compose up --watch` and refresh `http://localhost:8000` a few times to build up a count. + +3. Tear down the stack with `docker compose down` and then bring it back up again with `docker compose up --watch`. + +4. Open `http://localhost:8000` — the counter continues from where it left off. + +5. Now reset the counter with `docker compose down -v`. + + The `-v` flag removes named volumes along with the containers. Use this intentionally — it permanently deletes the stored data. + +## Step 6: Structure your project with multiple Compose files + +As applications grow, a single `compose.yaml` becomes harder to maintain. The `include` +top-level element lets you split services across multiple files while keeping them part of the +same application. + +This is especially useful when different teams own different parts of the stack, or when +you want to reuse infrastructure definitions across projects. + +1. Create a new file in your project directory called `infra.yaml` and move the Redis service and volume into it: + + ```yaml + services: + redis: + image: redis:alpine + volumes: + - redis-data:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 3s + retries: 5 + start_period: 10s + + volumes: + redis-data: + ``` + +2. Update `compose.yaml` to include `infra.yaml`: ```yaml include: - - infra.yaml + - path: ./infra.yaml services: web: build: . ports: - - "8000:5000" + - "${APP_PORT}:5000" + environment: + - REDIS_HOST=${REDIS_HOST} + - REDIS_PORT=${REDIS_PORT} + depends_on: + redis: + condition: service_healthy develop: watch: - - action: sync + - action: sync+restart path: . target: /code + - action: rebuild + path: requirements.txt ``` -4. Run `docker compose up` to build the app with the updated Compose files, and run it. You should see the `Hello world` message in your browser. +3. Run the application to confirm everything still works: + + ```console + $ docker compose up --watch + ``` -This is a simplified example, but it demonstrates the basic principle of `include` and how it can make it easier to modularize complex applications into sub-Compose files. For more information on `include` and working with multiple Compose files, see [Working with multiple Compose files](/manuals/compose/how-tos/multiple-compose-files/_index.md). + Compose merges both files at startup. The `web` service can still reference `redis` + by name because all included services share the same default network. -## Step 8: Experiment with some other commands + This is a simplified example, but it demonstrates the basic principle of `include` and how it can make it easier to modularize complex applications into sub-Compose files. For more information on `include` and working with multiple Compose files, see [Working with multiple Compose files](/manuals/compose/how-tos/multiple-compose-files/_index.md). -- If you want to run your services in the background, you can pass the `-d` flag (for "detached" mode) to `docker compose up` and use `docker compose ps` to see what is currently running: +4. Stop the stack before moving on: ```console - $ docker compose up -d + $ docker compose down + ``` - Starting composetest_redis_1... - Starting composetest_web_1... +## Step 7: Inspect and debug your running stack - $ docker compose ps +With a fully configured stack, you can observe what's happening inside your containers +without stopping anything. This step covers the core commands for inspecting the resolved configuration, streaming logs, and running commands +inside a running container. - Name Command State Ports - ------------------------------------------------------------------------------------- - composetest_redis_1 docker-entrypoint.sh redis ... Up 6379/tcp - composetest_web_1 flask run Up 0.0.0.0:8000->5000/tcp - ``` +Before starting the stack, verify that Compose has resolved your `.env` variables and +merged all files correctly: + +```console +$ docker compose config +``` -- Run `docker compose --help` to see other available commands. +`docker compose config` doesn't require the stack to be running — it works purely from +your files. A few things worth noting in the output: -- If you started Compose with `docker compose up -d`, stop your services once you've finished with them: +- `${APP_PORT}`, `${REDIS_HOST}`, and `${REDIS_PORT}` have all been replaced with the + values from your `.env` file. +- Short-form port notation (`"8000:5000"`) is expanded into its canonical fields + (`target`, `published`, `protocol`). +- The default network and volume names are made explicit, prefixed with the project name + `compose-demo`. +- The output is the fully resolved configuration, with any files + brought in via `include` merged into a single view. - ```console - $ docker compose stop - ``` +Use `docker compose config` any time you want to confirm what Compose will actually +apply, especially when debugging variable substitution or working with multiple Compose files. -- You can bring everything down, removing the containers entirely, with the `docker compose down` command. +Now start the stack in detached mode so the terminal stays free for the commands that +follow: + +```console +$ docker compose up -d +``` + +### Stream logs from all services + +```console +$ docker compose logs -f +``` + +The `-f` flag follows the log stream in real time, interleaving output from both +containers with color-coded service name prefixes. Refresh `http://localhost:8000` a +few times and watch the Flask request logs appear. To follow logs for a single service, +pass its name: + +```console +$ docker compose logs -f web +``` + +Press `Ctrl+C` to stop following logs. The containers keep running. + +### Run commands inside a running container + +`docker compose exec` runs a command inside an already-running container without +starting a new one. This is the primary tool for live debugging. + +#### Verify environment variables are set correctly + +```console +$ docker compose exec web env | grep REDIS +``` + +```text +REDIS_HOST=redis +REDIS_PORT=6379 +``` + +#### Test that the `web` container can reach Redis using the service name as the hostname + +```console +$ docker compose exec web python -c "import redis; r = redis.Redis(host='redis'); print(r.ping())" +``` + +```text +True +``` + +This uses the same `redis` library your app uses, so a `True` response confirms that +service discovery, networking, and the Redis connection are all working end to end. + +#### Inspect the live value of the hit counter in Redis + +```console +$ docker compose exec redis redis-cli GET hits +``` ## Where to go next -- Try the [Sample apps with Compose](https://github.com/docker/awesome-compose) -- [Explore the full list of Compose commands](/reference/cli/docker/compose.md) +- [Explore the full list of Compose commands](/reference/cli/docker/compose/) - [Explore the Compose file reference](/reference/compose-file/_index.md) - [Check out the Learning Docker Compose video on LinkedIn Learning](https://www.linkedin.com/learning/learning-docker-compose/) +- [Learn how to set environment variables in Compose](/compose/how-tos/environment-variables/set-environment-variables/) +- [Learn how to package and distribute your Compose app](/compose/how-tos/oci-artifact/) + + diff --git a/content/manuals/compose/how-tos/dependent-images.md b/content/manuals/compose/how-tos/dependent-images.md index d62668548ac..7491f7dd11b 100644 --- a/content/manuals/compose/how-tos/dependent-images.md +++ b/content/manuals/compose/how-tos/dependent-images.md @@ -8,7 +8,7 @@ weight: 50 {{< summary-bar feature_name="Compose dependent images" >}} To reduce push/pull time and image weight, a common practice for Compose applications is to have services -share base layers as much as possible. You will typically select the same operating system base image for +share base layers as much as possible. You typically select the same operating system base image for all services. But you can also get one step further by sharing image layers when your images share the same system packages. The challenge to address is then to avoid repeating the exact same Dockerfile instruction in all services. @@ -162,3 +162,8 @@ Bake can also be selected as the default builder by editing your `$HOME/.docker/ ... } ``` + +## Additional resources + +- [Docker Compose build reference](/reference/cli/docker/compose/build/) +- [Learn about multi-stage Dockerfiles](/manuals/build/building/multi-stage.md) diff --git a/content/manuals/compose/how-tos/environment-variables/_index.md b/content/manuals/compose/how-tos/environment-variables/_index.md index a2ddb86929a..0775edc2665 100644 --- a/content/manuals/compose/how-tos/environment-variables/_index.md +++ b/content/manuals/compose/how-tos/environment-variables/_index.md @@ -2,14 +2,13 @@ title: Environment variables in Compose linkTitle: Use environment variables weight: 40 -description: Explainer on the ways to set, use and manage environment variables in - Compose +description: Explains how to set, use, and manage environment variables in Docker Compose. keywords: compose, orchestration, environment, env file aliases: - /compose/environment-variables/ --- -By leveraging environment variables and interpolation in Docker Compose, you can create versatile and reusable configurations, making your Dockerized applications easier to manage and deploy across different environments. +Environment variables and interpolation in Docker Compose help you create reusable, flexible configurations. This makes Dockerized applications easier to manage and deploy across environments. > [!TIP] > diff --git a/content/manuals/compose/how-tos/environment-variables/envvars-precedence.md b/content/manuals/compose/how-tos/environment-variables/envvars-precedence.md index f5e14549472..8197d8f18f7 100644 --- a/content/manuals/compose/how-tos/environment-variables/envvars-precedence.md +++ b/content/manuals/compose/how-tos/environment-variables/envvars-precedence.md @@ -12,7 +12,7 @@ aliases: When the same environment variable is set in multiple sources, Docker Compose follows a precedence rule to determine the value for that variable in your container's environment. -This page contains information on the level of precedence each method of setting environmental variables takes. +This page explains how Docker Compose determines the final value of an environment variable when it's defined in multiple locations. The order of precedence (highest to lowest) is as follows: 1. Set using [`docker compose run -e` in the CLI](set-environment-variables.md#set-environment-variables-with-docker-compose-run---env). @@ -59,25 +59,25 @@ The columns `Host OS environment` and `.env` file is listed only for illustratio Each row represents a combination of contexts where `VALUE` is set, substituted, or both. The **Result** column indicates the final value for `VALUE` in each scenario. -| # | `docker compose run` | `environment` attribute | `env_file` attribute | Image `ENV` | `Host OS` environment | `.env` file | | Result | -|:--:|:----------------:|:-------------------------------:|:----------------------:|:------------:|:-----------------------:|:-----------------:|:---:|:----------:| -| 1 | - | - | - | - | `VALUE=1.4` | `VALUE=1.3` || - | -| 2 | - | - | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | - ||**`VALUE=1.6`** | -| 3 | - | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | - ||**`VALUE=1.7`** | -| 4 | - | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.5`** | -| 5 |`--env VALUE=1.8` | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.8`** | -| 6 |`--env VALUE` | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.4`** | -| 7 |`--env VALUE` | - | - | `VALUE=1.5` | - | `VALUE=1.3` ||**`VALUE=1.3`** | -| 8 | - | - | `VALUE` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.4`** | -| 9 | - | - | `VALUE` | `VALUE=1.5` | - | `VALUE=1.3` ||**`VALUE=1.3`** | -| 10 | - | `VALUE` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.4`** | -| 11 | - | `VALUE` | - | `VALUE=1.5` | - | `VALUE=1.3` ||**`VALUE=1.3`** | -| 12 |`--env VALUE` | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.4`** | -| 13 |`--env VALUE=1.8` | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.8`** | -| 14 |`--env VALUE=1.8` | - | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.8`** | -| 15 |`--env VALUE=1.8` | `VALUE=1.7` | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.8`** | - -### Result explanation +| # | `docker compose run` | `environment` attribute | `env_file` attribute | Image `ENV` | `Host OS` environment | `.env` file | Result | +|:--:|:----------------:|:-------------------------------:|:----------------------:|:------------:|:-----------------------:|:-----------------:|:----------:| +| 1 | - | - | - | - | `VALUE=1.4` | `VALUE=1.3` | - | +| 2 | - | - | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | - |**`VALUE=1.6`** | +| 3 | - | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | - |**`VALUE=1.7`** | +| 4 | - | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.5`** | +| 5 |`--env VALUE=1.8` | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.8`** | +| 6 |`--env VALUE` | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.4`** | +| 7 |`--env VALUE` | - | - | `VALUE=1.5` | - | `VALUE=1.3` |**`VALUE=1.3`** | +| 8 | - | - | `VALUE` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.4`** | +| 9 | - | - | `VALUE` | `VALUE=1.5` | - | `VALUE=1.3` |**`VALUE=1.3`** | +| 10 | - | `VALUE` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.4`** | +| 11 | - | `VALUE` | - | `VALUE=1.5` | - | `VALUE=1.3` |**`VALUE=1.3`** | +| 12 |`--env VALUE` | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.4`** | +| 13 |`--env VALUE=1.8` | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.8`** | +| 14 |`--env VALUE=1.8` | - | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.8`** | +| 15 |`--env VALUE=1.8` | `VALUE=1.7` | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.8`** | + +### Understanding precedence results Result 1: The local environment takes precedence, but the Compose file is not set to replicate this inside the container, so no such variable is set. @@ -87,7 +87,7 @@ Result 3: The `environment` attribute in the Compose file defines an explicit va Result 4: The image's `ENV` directive declares the variable `VALUE`, and since the Compose file is not set to override this value, this variable is defined by image -Result 5: The `docker compose run` command has the `--env` flag set which an explicit value, and overrides the value set by the image. +Result 5: The `docker compose run` command has the `--env` flag set with an explicit value, and overrides the value set by the image. Result 6: The `docker compose run` command has the `--env` flag set to replicate the value from the environment. Host OS value takes precedence and is replicated into the container's environment. @@ -104,3 +104,8 @@ Result 11: The `environment` attribute in the Compose file is set to replicate ` Result 12: The `--env` flag has higher precedence than the `environment` and `env_file` attributes and is to set to replicate `VALUE` from the local environment. Host OS value takes precedence and is replicated into the container's environment. Results 13 to 15: The `--env` flag has higher precedence than the `environment` and `env_file` attributes and so sets the value. + +## Next steps + +- [Set environment variables in Compose](set-environment-variables.md) +- [Use variable interpolation in Compose files](variable-interpolation.md) diff --git a/content/manuals/compose/how-tos/environment-variables/envvars.md b/content/manuals/compose/how-tos/environment-variables/envvars.md index 67d5929673f..e2ad859dd73 100644 --- a/content/manuals/compose/how-tos/environment-variables/envvars.md +++ b/content/manuals/compose/how-tos/environment-variables/envvars.md @@ -1,7 +1,7 @@ --- description: Compose pre-defined environment variables -keywords: fig, composition, compose, docker, orchestration, cli, reference -title: Set or change pre-defined environment variables in Docker Compose +keywords: fig, composition, compose, docker, orchestration, cli, reference, compose environment configuration, docker env variables +title: Configure pre-defined environment variables in Docker Compose linkTitle: Pre-defined environment variables weight: 30 aliases: @@ -9,9 +9,9 @@ aliases: - /compose/environment-variables/envvars/ --- -Compose already comes with pre-defined environment variables. It also inherits common Docker CLI environment variables, such as `DOCKER_HOST` and `DOCKER_CONTEXT`. See [Docker CLI environment variable reference](/reference/cli/docker/#environment-variables) for details. +Docker Compose includes several pre-defined environment variables. It also inherits common Docker CLI environment variables, such as `DOCKER_HOST` and `DOCKER_CONTEXT`. See [Docker CLI environment variable reference](/reference/cli/docker/#environment-variables) for details. -This page contains information on how you can set or change the following pre-defined environment variables if you need to: +This page explains how to set or change the following pre-defined environment variables: - `COMPOSE_PROJECT_NAME` - `COMPOSE_FILE` @@ -24,21 +24,26 @@ This page contains information on how you can set or change the following pre-de - `COMPOSE_ANSI` - `COMPOSE_STATUS_STDOUT` - `COMPOSE_ENV_FILES` +- `COMPOSE_DISABLE_ENV_FILE` - `COMPOSE_MENU` - `COMPOSE_EXPERIMENTAL` +- `COMPOSE_PROGRESS` ## Methods to override -You can set or change the pre-defined environment variables: -- With an [`.env` file located in your working directory](/manuals/compose/how-tos/environment-variables/variable-interpolation.md) -- From the command line -- From your [shell](variable-interpolation.md#substitute-from-the-shell) +| Method | Description | +| ----------- | -------------------------------------------- | +| [`.env` file](/manuals/compose/how-tos/environment-variables/variable-interpolation.md) | Located in the working directory. | +| [Shell](variable-interpolation.md#substitute-from-the-shell) | Defined in the host operating system shell. | +| CLI | Passed with `--env` or `-e` flag at runtime. | When changing or setting any environment variables, be aware of [Environment variable precedence](envvars-precedence.md). -## Configure +## Configuration details -### COMPOSE\_PROJECT\_NAME +### Project and file configuration + +#### COMPOSE\_PROJECT\_NAME Sets the project name. This value is prepended along with the service name to the container's name on startup. @@ -61,9 +66,9 @@ underscores, and must begin with a lowercase letter or decimal digit. If the `basename` of the project directory or current directory violates this constraint, you must use one of the other mechanisms. -See also the [command-line options overview](/reference/cli/docker/compose/_index.md#command-options-overview-and-help) and [using `-p` to specify a project name](/reference/cli/docker/compose/_index.md#use--p-to-specify-a-project-name). +See also [using `-p` to specify a project name](/reference/cli/docker/compose/#use--p-to-specify-a-project-name). -### COMPOSE\_FILE +#### COMPOSE\_FILE Specifies the path to a Compose file. Specifying multiple Compose files is supported. @@ -78,9 +83,9 @@ Specifies the path to a Compose file. Specifying multiple Compose files is suppo ``` The path separator can also be customized using [`COMPOSE_PATH_SEPARATOR`](#compose_path_separator). -See also the [command-line options overview](/reference/cli/docker/compose/_index.md#command-options-overview-and-help) and [using `-f` to specify name and path of one or more Compose files](/reference/cli/docker/compose/_index.md#use--f-to-specify-the-name-and-path-of-one-or-more-compose-files). +See also [using `-f` to specify name and path of one or more Compose files](/reference/cli/docker/compose/#use--f-to-specify-the-name-and-path-of-one-or-more-compose-files). -### COMPOSE\_PROFILES +#### COMPOSE\_PROFILES Specifies one or more profiles to be enabled when `docker compose up` is run. @@ -97,26 +102,49 @@ The following example enables all services matching both the `frontend` and `deb COMPOSE_PROFILES=frontend,debug ``` -See also [Using profiles with Compose](../profiles.md) and the [`--profile` command-line option](/reference/cli/docker/compose/_index.md#use-profiles-to-enable-optional-services). +See also [Using profiles with Compose](../profiles.md) and the [`--profile` command-line option](/reference/cli/docker/compose/#use-profiles-to-enable-optional-services). -### COMPOSE\_CONVERT\_WINDOWS\_PATHS +#### COMPOSE\_PATH\_SEPARATOR -When enabled, Compose performs path conversion from Windows-style to Unix-style in volume definitions. +Specifies a different path separator for items listed in `COMPOSE_FILE`. + +- Defaults to: + - On macOS and Linux to `:` + - On Windows to`;` + +#### COMPOSE\_ENV\_FILES + +Specifies which environment files Compose should use if `--env-file` isn't used. + +When using multiple environment files, use a comma as a separator. For example: + +```console +COMPOSE_ENV_FILES=.env.envfile1,.env.envfile2 +``` + +If `COMPOSE_ENV_FILES` is not set, and you don't provide `--env-file` in the CLI, Docker Compose uses the default behavior, which is to look for an `.env` file in the project directory. + +#### COMPOSE\_DISABLE\_ENV\_FILE + +Lets you disable the use of the default `.env` file. - Supported values: - - `true` or `1`, to enable - - `false` or `0`, to disable + - `true` or `1`, Compose ignores the `.env` file + - `false` or `0`, Compose looks for an `.env` file in the project directory - Defaults to: `0` -### COMPOSE\_PATH\_SEPARATOR +### Environment handling and container lifecycle -Specifies a different path separator for items listed in `COMPOSE_FILE`. +#### COMPOSE\_CONVERT\_WINDOWS\_PATHS -- Defaults to: - - On macOS and Linux to `:` - - On Windows to`;` +When enabled, Compose performs path conversion from Windows-style to Unix-style in volume definitions. + +- Supported values: + - `true` or `1`, to enable + - `false` or `0`, to disable +- Defaults to: `0` -### COMPOSE\_IGNORE\_ORPHANS +#### COMPOSE\_IGNORE\_ORPHANS When enabled, Compose doesn't try to detect orphaned containers for the project. @@ -125,7 +153,7 @@ When enabled, Compose doesn't try to detect orphaned containers for the project. - `false` or `0`, to disable - Defaults to: `0` -### COMPOSE\_REMOVE\_ORPHANS +#### COMPOSE\_REMOVE\_ORPHANS When enabled, Compose automatically removes orphaned containers when updating a service or stack. Orphaned containers are those that were created by a previous configuration but are no longer defined in the current `compose.yaml` file. @@ -134,11 +162,13 @@ When enabled, Compose automatically removes orphaned containers when updating a - `false` or `0`, to disable automatic removal. Compose displays a warning about orphaned containers instead. - Defaults to: `0` -### COMPOSE\_PARALLEL\_LIMIT +#### COMPOSE\_PARALLEL\_LIMIT Specifies the maximum level of parallelism for concurrent engine calls. -### COMPOSE\_ANSI +### Output + +#### COMPOSE\_ANSI Specifies when to print ANSI control characters. @@ -148,7 +178,7 @@ Specifies when to print ANSI control characters. - `always` or `0`, use TTY mode - Defaults to: `auto` -### COMPOSE\_STATUS\_STDOUT +#### COMPOSE\_STATUS\_STDOUT When enabled, Compose writes its internal status and progress messages to `stdout` instead of `stderr`. The default value is false to clearly separate the output streams between Compose messages and your container's logs. @@ -158,34 +188,33 @@ The default value is false to clearly separate the output streams between Compos - `false` or `0`, to disable - Defaults to: `0` -### COMPOSE\_ENV\_FILES +#### COMPOSE\_PROGRESS -Lets you specify which environment files Compose should use if `--env-file` isn't used. +{{< summary-bar feature_name="Compose progress" >}} -When using multiple environment files, use a comma as a separator. For example: +Defines the type of progress output, if `--progress` isn't used. -```console -COMPOSE_ENV_FILES=.env.envfile1, .env.envfile2 -``` +Supported values are `auto`, `tty`, `plain`, `json`, and `quiet`. +Default is `auto`. -If `COMPOSE_ENV_FILES` is not set, and you don't provide `--env-file` in the CLI, Docker Compose uses the default behavior, which is to look for an `.env` file in the project directory. +### User experience -### COMPOSE\_MENU +#### COMPOSE\_MENU {{< summary-bar feature_name="Compose menu" >}} -When enabled, Compose displays a navigation menu where you can choose to open the Compose stack in Docker Desktop, switch on [`watch` mode](../file-watch.md), or use [Docker Debug](/reference/cli/docker/debug.md). +When enabled, Compose displays a navigation menu where you can choose to open the Compose stack in Docker Desktop, switch on [`watch` mode](../file-watch.md), or use [Docker Debug](/reference/cli/docker/debug/). - Supported values: - `true` or `1`, to enable - `false` or `0`, to disable - Defaults to: `1` if you obtained Docker Compose through Docker Desktop, otherwise the default is `0` -### COMPOSE\_EXPERIMENTAL +#### COMPOSE\_EXPERIMENTAL {{< summary-bar feature_name="Compose experimental" >}} -This is an opt-out variable. When turned off it deactivates the experimental features such as the navigation menu or [Synchronized file shares](/manuals/desktop/features/synchronized-file-sharing.md). +This is an opt-out variable. When turned off it deactivates the experimental features. - Supported values: - `true` or `1`, to enable @@ -195,7 +224,6 @@ This is an opt-out variable. When turned off it deactivates the experimental fea ## Unsupported in Compose V2 The following environment variables have no effect in Compose V2. -For more information, see [Migrate to Compose V2](/manuals/compose/releases/migrate.md). - `COMPOSE_API_VERSION` By default the API version is negotiated with the server. Use `DOCKER_API_VERSION`. @@ -206,3 +234,4 @@ For more information, see [Migrate to Compose V2](/manuals/compose/releases/migr - `COMPOSE_INTERACTIVE_NO_CLI` - `COMPOSE_DOCKER_CLI_BUILD` Use `DOCKER_BUILDKIT` to select between BuildKit and the classic builder. If `DOCKER_BUILDKIT=0` then `docker compose build` uses the classic builder to build images. + diff --git a/content/manuals/compose/how-tos/environment-variables/variable-interpolation.md b/content/manuals/compose/how-tos/environment-variables/variable-interpolation.md index bc2461c78ed..74140869546 100644 --- a/content/manuals/compose/how-tos/environment-variables/variable-interpolation.md +++ b/content/manuals/compose/how-tos/environment-variables/variable-interpolation.md @@ -12,9 +12,9 @@ aliases: A Compose file can use variables to offer more flexibility. If you want to quickly switch between image tags to test multiple versions, or want to adjust a volume source to your local -environment, you don't need to edit the Compose file each time, you can just set variables that insert values into your Compose file at run time. +environment, you don't need to edit the Compose file each time, you can just set variables that insert values into your Compose file at runtime. -Interpolation can also be used to insert values into your Compose file at run time, which is then used to pass variables into your container's environment +Interpolation can also be used to insert values into your Compose file at runtime, which is then used to pass variables into your container's environment Below is a simple example: @@ -28,7 +28,7 @@ services: ``` When you run `docker compose up`, the `web` service defined in the Compose file [interpolates](variable-interpolation.md) in the image `webapp:v1.5` which was set in the `.env` file. You can verify this with the -[config command](/reference/cli/docker/compose/config.md), which prints your resolved application config to the terminal: +[config command](/reference/cli/docker/compose/config/), which prints your resolved application config to the terminal: ```console $ docker compose config @@ -130,9 +130,13 @@ The following syntax rules apply to environment files: - Blank lines are ignored. - Unquoted and double-quoted (`"`) values have interpolation applied. - Each line represents a key-value pair. Values can optionally be quoted. +- Delimiter separating key and value can be either `=` or `:`. +- Spaces before and after value are ignored. - `VAR=VAL` -> `VAL` - `VAR="VAL"` -> `VAL` - `VAR='VAL'` -> `VAL` + - `VAR: VAL` -> `VAL` + - `VAR = VAL ` -> `VAL` - Inline comments for unquoted values must be preceded with a space. - `VAR=VAL # comment` -> `VAL` - `VAR=VAL# not a comment` -> `VAL# not a comment` @@ -149,6 +153,21 @@ The following syntax rules apply to environment files: - `VAR="some\tvalue"` -> `some value` - `VAR='some\tvalue'` -> `some\tvalue` - `VAR=some\tvalue` -> `some\tvalue` +- Single-quoted values can span multiple lines. Example: + + ```yaml + KEY='SOME + VALUE' + ``` + + If you then run `docker compose config`, you'll see: + + ```yaml + environment: + KEY: |- + SOME + VALUE + ``` ### Substitute with `--env-file` diff --git a/content/manuals/compose/how-tos/file-watch.md b/content/manuals/compose/how-tos/file-watch.md index e59386d267e..624df4f8767 100644 --- a/content/manuals/compose/how-tos/file-watch.md +++ b/content/manuals/compose/how-tos/file-watch.md @@ -118,6 +118,10 @@ For `path: ./app/html` and a change to `./app/html/index.html`: The `ignore` patterns are relative to the `path` defined in the current `watch` action, not to the project directory. In the following Example 1, the ignore path would be relative to the `./web` directory specified in the `path` attribute. +### `initial_sync` + +When using a `sync+x` action, the `initial_sync` attribute tells Compose to ensure files that are part of the defined `path` are up to date before starting a new watch session. + ## Example 1 This minimal example targets a Node.js application with the following structure: @@ -142,6 +146,7 @@ services: - action: sync path: ./web target: /src/web + initial_sync: true ignore: - node_modules/ - action: rebuild @@ -207,10 +212,6 @@ This setup demonstrates how to use the `sync+restart` action in Docker Compose t > or [local setup for Docker docs](https://github.com/docker/docs/blob/main/CONTRIBUTING.md) > for a demonstration of Compose `watch`. -## Feedback - -We are actively looking for feedback on this feature. Give feedback or report any bugs you may find in the [Compose Specification repository](https://github.com/compose-spec/compose-spec/pull/253). - ## Reference - [Compose Develop Specification](/reference/compose-file/develop.md) diff --git a/content/manuals/compose/how-tos/gpu-support.md b/content/manuals/compose/how-tos/gpu-support.md index 0d5c6b7b4d0..eeb7919c416 100644 --- a/content/manuals/compose/how-tos/gpu-support.md +++ b/content/manuals/compose/how-tos/gpu-support.md @@ -1,7 +1,7 @@ --- -description: Understand GPU support in Docker Compose +description: Learn how to configure Docker Compose to use NVIDIA GPUs with CUDA-based containers keywords: documentation, docs, docker, compose, GPU access, NVIDIA, samples -title: Enable GPU access with Docker Compose +title: Run Docker Compose services with GPU access linkTitle: Enable GPU support weight: 90 aliases: @@ -11,7 +11,6 @@ aliases: Compose services can define GPU device reservations if the Docker host contains such devices and the Docker Daemon is set accordingly. For this, make sure you install the [prerequisites](/manuals/engine/containers/resource_constraints.md#gpu) if you haven't already done so. The examples in the following sections focus specifically on providing service containers access to GPU devices with Docker Compose. -You can use either `docker-compose` or `docker compose` commands. For more information, see [Migrate to Compose V2](/manuals/compose/releases/migrate.md). ## Enabling GPU access to service containers @@ -19,16 +18,18 @@ GPUs are referenced in a `compose.yaml` file using the [device](/reference/compo This provides more granular control over a GPU reservation as custom values can be set for the following device properties: -- `capabilities`. This value specifies as a list of strings (eg. `capabilities: [gpu]`). You must set this field in the Compose file. Otherwise, it returns an error on service deployment. -- `count`. This value, specified as an integer or the value `all`, represents the number of GPU devices that should be reserved (providing the host holds that number of GPUs). If `count` is set to `all` or not specified, all GPUs available on the host are used by default. +- `capabilities`. This value is specified as a list of strings. For example, `capabilities: [gpu]`. You must set this field in the Compose file. Otherwise, it returns an error on service deployment. +- `count`. Specified as an integer or the value `all`, represents the number of GPU devices that should be reserved (providing the host holds that number of GPUs). If `count` is set to `all` or not specified, all GPUs available on the host are used by default. - `device_ids`. This value, specified as a list of strings, represents GPU device IDs from the host. You can find the device ID in the output of `nvidia-smi` on the host. If no `device_ids` are set, all GPUs available on the host are used by default. -- `driver`. This value is specified as a string, for example `driver: 'nvidia'` +- `driver`. Specified as a string, for example `driver: 'nvidia'` - `options`. Key-value pairs representing driver specific options. > [!IMPORTANT] > > You must set the `capabilities` field. Otherwise, it returns an error on service deployment. + +> [!NOTE] > > `count` and `device_ids` are mutually exclusive. You must only define one field at a time. @@ -39,7 +40,7 @@ For more information on these properties, see the [Compose Deploy Specification] ```yaml services: test: - image: nvidia/cuda:12.3.1-base-ubuntu20.04 + image: nvidia/cuda:12.9.0-base-ubuntu22.04 command: nvidia-smi deploy: resources: diff --git a/content/manuals/compose/how-tos/lifecycle.md b/content/manuals/compose/how-tos/lifecycle.md index d60a942d969..2903480e7e3 100644 --- a/content/manuals/compose/how-tos/lifecycle.md +++ b/content/manuals/compose/how-tos/lifecycle.md @@ -2,8 +2,8 @@ title: Using lifecycle hooks with Compose linkTitle: Use lifecycle hooks weight: 20 -desription: How to use lifecycle hooks with Docker Compose -keywords: cli, compose, lifecycle, hooks reference +description: Learn how to use Docker Compose lifecycle hooks like post_start and pre_stop to customize container behavior. +keywords: docker compose lifecycle hooks, post_start, pre_stop, docker compose entrypoint, docker container stop hooks, compose hook commands --- {{< summary-bar feature_name="Compose lifecycle hooks" >}} @@ -42,7 +42,7 @@ services: volumes: - data:/data post_start: - - command: chown -R /data 1001:1001 + - command: chown -R 1001:1001 /data user: root volumes: diff --git a/content/manuals/compose/how-tos/model-runner.md b/content/manuals/compose/how-tos/model-runner.md deleted file mode 100644 index 2a7fca43ca8..00000000000 --- a/content/manuals/compose/how-tos/model-runner.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Use Docker Model Runner -description: Learn how to integrate Docker Model Runner with Docker Compose to build AI-powered applications -keywords: compose, docker compose, model runner, ai, llm, artificial intelligence, machine learning -weight: 111 -params: - sidebar: - badge: - color: green - text: New ---- - -{{< summary-bar feature_name="Compose model runner" >}} - -Docker Model Runner can be integrated with Docker Compose to run AI models as part of your multi-container applications. -This lets you define and run AI-powered applications alongside your other services. - -## Prerequisites - -- Docker Compose v2.35 or later -- Docker Desktop 4.41 or later -- Docker Desktop for Mac with Apple Silicon or Docker Desktop for Windows with NVIDIA GPU -- [Docker Model Runner enabled in Docker Desktop](/manuals/ai/model-runner.md#enable-docker-model-runner) - -## Provider services - -Compose introduces a new service type called `provider` that allows you to declare platform capabilities required by your application. For AI models, you can use the `model` type to declare model dependencies. - -Here's an example of how to define a model provider: - -```yaml -services: - chat: - image: my-chat-app - depends_on: - - ai_runner - - ai_runner: - provider: - type: model - options: - model: ai/smollm2 -``` - -Notice the dedicated `provider` attribute in the `ai_runner` service. -This attribute specifies that the service is a model provider and lets you define options such as the name of the model to be used. - -There is also a `depends_on` attribute in the `chat` service. -This attribute specifies that the `chat` service depends on the `ai_runner` service. -This means that the `ai_runner` service will be started before the `chat` service to allow injection of model information to the `chat` service. - -## How it works - -During the `docker compose up` process, Docker Model Runner automatically pulls and runs the specified model. -It also sends Compose the model tag name and the URL to access the model runner. - -This information is then passed to services which declare a dependency on the model provider. -In the example above, the `chat` service receives 2 environment variables prefixed by the service name: - - `AI_RUNNER_URL` with the URL to access the model runner - - `AI_RUNNER_MODEL` with the model name which could be passed with the URL to request the model. - -This lets the `chat` service to interact with the model and use it for its own purposes. - -## Reference - -- [Docker Model Runner documentation](/manuals/ai/model-runner.md) diff --git a/content/manuals/compose/how-tos/multiple-compose-files/extends.md b/content/manuals/compose/how-tos/multiple-compose-files/extends.md index 2ba1bb55b99..ac8001af9a6 100644 --- a/content/manuals/compose/how-tos/multiple-compose-files/extends.md +++ b/content/manuals/compose/how-tos/multiple-compose-files/extends.md @@ -1,7 +1,6 @@ --- -description: How to use Docker Compose's extends keyword to share configuration between - files and projects -keywords: fig, composition, compose, docker, orchestration, documentation, docs +description: Learn how to reuse service configurations across files and projects using Docker Compose’s extends attribute. +keywords: fig, composition, compose, docker, orchestration, documentation, docs, compose file modularization title: Extend your Compose file linkTitle: Extend weight: 20 @@ -29,7 +28,7 @@ configuration. Tracking which fragment of a service is relative to which path is difficult and confusing, so to keep paths easier to understand, all paths must be defined relative to the base file. -## How it works +## How the `extends` attribute works ### Extending services from another file @@ -43,7 +42,7 @@ services: service: webapp ``` -This instructs Compose to re-use only the properties of the `webapp` service +This instructs Compose to reuse only the properties of the `webapp` service defined in the `common-services.yml` file. The `webapp` service itself is not part of the final project. If `common-services.yml` @@ -62,12 +61,12 @@ You get exactly the same result as if you wrote `compose.yaml` with the same `build`, `ports`, and `volumes` configuration values defined directly under `web`. -To include the service `webapp` in the final project when extending services from another file, you need to explicitly include both services in your current Compose file. For example (note this is a non-normative example): +To include the service `webapp` in the final project when extending services from another file, you need to explicitly include both services in your current Compose file. For example (this is for illustrative purposes only): ```yaml services: web: - build: alpine + build: ./alpine command: echo extends: file: common-services.yml @@ -87,7 +86,7 @@ If you define services in the same Compose file and extend one service from anot ```yaml services: web: - build: alpine + build: ./alpine extends: webapp webapp: environment: @@ -158,20 +157,6 @@ services: - queue ``` -## Exceptions and limitations - -`volumes_from` and `depends_on` are never shared between services using -`extends`. These exceptions exist to avoid implicit dependencies; you always -define `volumes_from` locally. This ensures dependencies between services are -clearly visible when reading the current file. Defining these locally also -ensures that changes to the referenced file don't break anything. - -`extends` is useful if you only need a single service to be shared and you are -familiar with the file you're extending to, so you can tweak the -configuration. But this isn’t an acceptable solution when you want to re-use -someone else's unfamiliar configurations and you don’t know about its own -dependencies. - ## Relative paths When using `extends` with a `file` attribute which points to another folder, relative paths diff --git a/content/manuals/compose/how-tos/multiple-compose-files/include.md b/content/manuals/compose/how-tos/multiple-compose-files/include.md index a07f0b98913..db6139af59f 100644 --- a/content/manuals/compose/how-tos/multiple-compose-files/include.md +++ b/content/manuals/compose/how-tos/multiple-compose-files/include.md @@ -18,7 +18,7 @@ Once the included Compose application loads, all resources are copied into the c > [!NOTE] > -> `include` applies recursively so an included Compose file which declares its own `include` section, results in those other files being included as well. +> `include` applies recursively so an included Compose file which declares its own `include` section, causes those files to also be included. ## Example @@ -36,11 +36,24 @@ services: This means the team managing `serviceB` can refactor its own database component to introduce additional services without impacting any dependent teams. It also means that the dependent teams don't need to include additional flags on each Compose command they run. -## Include and overrides +```yaml +include: + - oci://docker.io/username/my-compose-app:latest # use a Compose file stored as an OCI artifact +services: + serviceA: + build: . + depends_on: + - serviceB +``` +`include` allows you to reference Compose files from remote sources, such as OCI artifacts or Git repositories. +Here `serviceB` is defined in a Compose file stored on Docker Hub. + +## Using overrides with included Compose files Compose reports an error if any resource from `include` conflicts with resources from the included Compose file. This rule prevents -unexpected conflicts with resources defined by the included compose file author. However, there may be some circumstances where you might want to tweak the +unexpected conflicts with resources defined by the included compose file author. However, there may be some circumstances where you might want to customize the included model. This can be achieved by adding an override file to the include directive: + ```yaml include: - path : @@ -49,7 +62,7 @@ include: ``` The main limitation with this approach is that you need to maintain a dedicated override file per include. For complex projects with multiple -includes this would result into many Compose files. +includes this would result in many Compose files. The other option is to use a `compose.override.yaml` file. While conflicts will be rejected from the file using `include` when same resource is declared, a global Compose override file can override the resulting merged model, as demonstrated in following example: diff --git a/content/manuals/compose/how-tos/multiple-compose-files/merge.md b/content/manuals/compose/how-tos/multiple-compose-files/merge.md index 360d73d2475..d0f1fd889bb 100644 --- a/content/manuals/compose/how-tos/multiple-compose-files/merge.md +++ b/content/manuals/compose/how-tos/multiple-compose-files/merge.md @@ -246,14 +246,14 @@ For more merging rules, see [Merge and override](/reference/compose-file/merge.m - You can use the `-f` flag to specify a path to a Compose file that is not located in the current directory, either from the command line or by setting up a [COMPOSE_FILE environment variable](../environment-variables/envvars.md#compose_file) in your shell or in an environment file. - For example, if you are running the [Compose Rails sample](https://github.com/docker/awesome-compose/tree/master/official-documentation-samples/rails/README.md), and have a `compose.yaml` file in a directory called `sandbox/rails`. You can use a command like [docker compose pull](/reference/cli/docker/compose/pull.md) to get the postgres image for the `db` service from anywhere by using the `-f` flag as follows: `docker compose -f ~/sandbox/rails/compose.yaml pull db` + For example, if you are running the [Compose Rails sample](https://github.com/docker/awesome-compose/tree/master/official-documentation-samples/rails/README.md), and have a `compose.yaml` file in a directory called `sandbox/rails`. You can use a command like [docker compose pull](/reference/cli/docker/compose/pull/) to get the postgres image for the `db` service from anywhere by using the `-f` flag as follows: `docker compose -f ~/sandbox/rails/compose.yaml pull db` Here's the full example: ```console $ docker compose -f ~/sandbox/rails/compose.yaml pull db - Pulling db (postgres:latest)... - latest: Pulling from library/postgres + Pulling db (postgres:18)... + 18: Pulling from library/postgres ef0380f84d05: Pull complete 50cf91dc1db8: Pull complete d3add4cd115c: Pull complete @@ -268,7 +268,7 @@ For more merging rules, see [Merge and override](/reference/compose-file/merge.m dcca70822752: Pull complete cecf11b8ccf3: Pull complete Digest: sha256:1364924c753d5ff7e2260cd34dc4ba05ebd40ee8193391220be0f9901d4e1651 - Status: Downloaded newer image for postgres:latest + Status: Downloaded newer image for postgres:18 ``` ## Example @@ -292,7 +292,7 @@ services: - cache db: - image: postgres:latest + image: postgres:18 cache: image: redis:latest diff --git a/content/manuals/compose/how-tos/networking.md b/content/manuals/compose/how-tos/networking.md index bea3c4004d7..8e86f278a4a 100644 --- a/content/manuals/compose/how-tos/networking.md +++ b/content/manuals/compose/how-tos/networking.md @@ -5,22 +5,16 @@ title: Networking in Compose linkTitle: Networking weight: 70 aliases: -- /compose/networking/ + - /compose/networking/ --- -{{% include "compose-eol.md" %}} +Compose handles networking for you by default, but gives you fine-grained control when you need it. This page explains how the default network works and how containers discover each other by name. It also covers when and how to define custom networks, connect services across separate Compose projects, map custom hostnames, and debug connectivity issues. -By default Compose sets up a single -[network](/reference/cli/docker/network/create.md) for your app. Each -container for a service joins the default network and is both reachable by -other containers on that network, and discoverable by the service's name. +## Default network and service discovery -> [!NOTE] -> -> Your app's network is given a name based on the "project name", -> which is based on the name of the directory it lives in. You can override the -> project name with either the [`--project-name` flag](/reference/cli/docker/compose.md) -> or the [`COMPOSE_PROJECT_NAME` environment variable](environment-variables/envvars.md#compose_project_name). +By default, Compose sets up a single [network](/reference/cli/docker/network/create/) for your app. Each container for a service joins the default network and is both reachable by other containers on that network, and discoverable by its service name. This network uses the `bridge` driver. To understand when you'd use a different driver, see [Network drivers: bridge vs host](#change-the-network-mode). + +For most development setups, the default network is sufficient. When you run `docker compose up`, Compose creates a network named `_default` and attaches all services to it. Each service registers its name with an internal DNS server, so containers can reach each other using the service name directly. No IP addresses or manual configuration is needed. For example, suppose your app is in a directory called `myapp`, and your `compose.yaml` looks like this: @@ -31,71 +25,65 @@ services: ports: - "8000:8000" db: - image: postgres + image: postgres:latest ports: - "8001:5432" ``` +Compose automatically connects all services to the default network, so you don't need to define `networks` explicitly in the Compose file. + When you run `docker compose up`, the following happens: -1. A network called `myapp_default` is created. -2. A container is created using `web`'s configuration. It joins the network - `myapp_default` under the name `web`. -3. A container is created using `db`'s configuration. It joins the network - `myapp_default` under the name `db`. +1. A network called `myapp_default` is created. +2. A container is created using `web`'s configuration. It joins `myapp_default` under the name `web`. +3. A container is created using `db`'s configuration. It joins `myapp_default` under the name `db`. -Each container can now look up the service name `web` or `db` and -get back the appropriate container's IP address. For example, `web`'s -application code could connect to the URL `postgres://db:5432` and start -using the Postgres database. +Each container can now look up the service name `web` or `db` and get back the appropriate container's IP address. The `web` service can connect to the database at `postgres://db:5432`. From the host machine, the same database is accessible at `postgres://localhost:8001` if your container is running locally. + +> [!TIP] +> +> Docker assigns container IP addresses dynamically from the network's subnet each time a container starts so they are not persisted across restarts or recreations. This means you should always reference services by name, not IP address. When containers are recreated, for example after a configuration change, they receive a new IP address. The service name stays stable. -It is important to note the distinction between `HOST_PORT` and `CONTAINER_PORT`. -In the above example, for `db`, the `HOST_PORT` is `8001` and the container port is -`5432` (postgres default). Networked service-to-service -communication uses the `CONTAINER_PORT`. When `HOST_PORT` is defined, -the service is accessible outside the swarm as well. +Your app's network is given a name based on the "project name", which is taken from the name of the directory it lives in. You can override the project name with either the [`--project-name` flag](/reference/cli/docker/compose/) or the [`COMPOSE_PROJECT_NAME` environment variable](environment-variables/envvars.md#compose_project_name). -Within the `web` container, your connection string to `db` would look like -`postgres://db:5432`, and from the host machine, the connection string would -look like `postgres://{DOCKER_IP}:8001` for example `postgres://localhost:8001` if your container is running locally. +The `HOST_PORT` and `CONTAINER_PORT` serve different purposes. In the example above, for `db`, the `HOST_PORT` is `8001` and the container port is `5432` (the Postgres default). Networked service-to-service communication uses the `CONTAINER_PORT`. The host port is only used when accessing the service from outside the network. -## Update containers on the network +### Updating containers on the network If you make a configuration change to a service and run `docker compose up` to update it, the old container is removed and the new one joins the network under a different IP address but the same name. Running containers can look up that name and connect to the new address, but the old address stops working. -If any containers have connections open to the old container, they are closed. It is a container's responsibility to detect this condition, look up the name again and reconnect. +If any containers have connections open to the old container, they are closed. It is each container's responsibility to detect this condition, look up the name again, and reconnect. -> [!TIP] -> -> Reference containers by name, not IP, whenever possible. Otherwise you’ll need to constantly update the IP address you use. - -## Link containers +## Change the network mode -Links allow you to define extra aliases by which a service is reachable from another service. They are not required to enable services to communicate. By default, any service can reach any other service at that service's name. In the following example, `db` is reachable from `web` at the hostnames `db` and `database`: +By default, each service joins the project's bridge network. It is the most secure networking mode. If you don't specify [`network_mode`](/reference/compose-file/services.md#network_mode), this is the type of network you are creating. -```yaml -services: - - web: - build: . - links: - - "db:database" - db: - image: postgres -``` +You can override the networking mode on a per-service basis. The `network_mode` option accepts the following values: -See the [links reference](/reference/compose-file/services.md#links) for more information. +- `host`: The container shares the host's network stack. No port mapping is needed or supported, and service name DNS resolution does not work. Use for system-level tools like network monitors that require direct access to host interfaces. A container using `network_mode: host` can access all host ports and observe all network traffic on the host. Use it only when genuinely required. +- `none`: Turns off all container networking. +- `service:{name}`: Gives the container access to the specified container by referring to its service name. +- `container:{name}`: Gives the container access to the specified container by referring to its container ID. -## Multi-host networking +You can mix modes in a single project: -When deploying a Compose application on a Docker Engine with [Swarm mode enabled](/manuals/engine/swarm/_index.md), -you can make use of the built-in `overlay` driver to enable multi-host communication. +```yaml +services: + app: + image: myapp + networks: + - isolated + ports: + - "3000:3000" -Overlay networks are always created as `attachable`. You can optionally set the [`attachable`](/reference/compose-file/networks.md#attachable) property to `false`. + monitoring: + image: netdata/netdata + network_mode: host # Can monitor host system and all host ports -Consult the [Swarm mode section](/manuals/engine/swarm/_index.md), to see how to set up -a Swarm cluster, and the [Getting started with multi-host networking](/manuals/engine/network/tutorials/overlay.md) -to learn about multi-host overlay networks. +networks: + isolated: + driver: bridge +``` ## Specify custom networks @@ -117,19 +105,17 @@ services: - frontend - backend db: - image: postgres + image: postgres:latest networks: - backend networks: frontend: - # Specify driver options - driver: bridge + driver: bridge # Specify driver options driver_opts: com.docker.network.bridge.host_binding_ipv4: "127.0.0.1" backend: - # Use a custom driver - driver: custom-driver + driver: custom-driver # Use a custom driver ``` Networks can be configured with static IP addresses by setting the [ipv4_address and/or ipv6_address](/reference/compose-file/services.md#ipv4_address-ipv6_address) for each attached network. @@ -145,7 +131,32 @@ networks: driver: custom-driver-1 ``` -## Configure the default network +### Internal networks + +Setting `internal: true` on a network creates it without a connection to the host's network interfaces. It has no default gateway for external connectivity. This is useful for services like databases that should be completely unreachable from outside the container network: + +```yaml +services: + cache: + image: redis + networks: + - isolated + + worker: + image: myworker + networks: + - isolated + - public + +networks: + isolated: + internal: true # No external connectivity + public: # Standard bridge network, created by Compose on docker compose up +``` + +Note that a service connected to both an internal and a non-internal network (like `worker` above) can still reach the internet via the non-internal network `public`. + +### Configure the default network Instead of, or as well as, specifying your own networks, you can also change the settings of the app-wide default network by defining an entry under `networks` named `default`: @@ -156,17 +167,17 @@ services: ports: - "8000:8000" db: - image: postgres + image: postgres:latest networks: default: - # Use a custom driver - driver: custom-driver-1 + driver: custom-driver-1 # Use a custom driver ``` -## Use a pre-existing network +## Use an existing external network + +If you've manually created a bridge network using `docker network create`, you can connect your Compose services to it by marking the network as [`external`](/reference/compose-file/networks.md#external): -If you want your containers to join a pre-existing network, use the [`external` option](/reference/compose-file/networks.md#external) ```yaml services: # ... @@ -176,9 +187,203 @@ networks: external: true ``` -Instead of attempting to create a network called `[projectname]_default`, Compose looks for a network called `my-pre-existing-network` and connects your app's containers to it. +Instead of creating `_default`, Compose looks for a network called `my-pre-existing-network` and connects your containers to it. + +### Connecting multiple Compose projects + +External networks are particularly useful when services in separate Compose projects need to communicate. Create a shared network once, then reference it as external in each project: + +```bash +docker network create inter-project +``` + +backend-compose.yaml: + +```yaml +services: + api: + image: myapi:latest + networks: + - shared + - default # Also keep the project's internal network + +networks: + shared: + external: true + name: inter-project +``` + +frontend-compose.yaml: + +```yaml +services: + web: + image: myfrontend:latest + environment: + API_URL: http://api:8080 # Reference by service name + networks: + - shared + +networks: + shared: + external: true + name: inter-project +``` + +Services on the same external network can reach each other by service name, just like services within a single project. + +> [!IMPORTANT] +> +> The external network must exist before you run `docker compose up`. If it doesn't, Compose fails with a `Network not found` error. Always create it first with `docker network create`. + +## Hybrid networking + +A service can belong to both an external shared network and its own project-internal network. This lets you expose only the services that need to be reachable from other projects, while keeping everything else, such as databases, fully isolated: + +```yaml +services: + api: + image: myapp-api + networks: + - shared # Reachable from other projects + - internal # Can also reach the database + + database: + image: postgres:latest + networks: + - internal # Not exposed on the shared network + +networks: + shared: + name: inter-project + external: true + internal: {} # Project-specific, isolated +``` + +## Custom DNS with `extra_hosts` + +You can add custom hostname-to-IP mappings to a container's `/etc/hosts` file using [`extra_hosts`](/reference/compose-file/services.md#extra_hosts). This is useful when a service needs to resolve a hostname that isn't registered in Docker's internal DNS. For example, a fixed-IP dependency or a staging endpoint: + +```yaml +services: + app: + image: myapp + extra_hosts: + - "api.staging:192.168.1.100" + - "cache.internal:192.168.1.101" +``` + +To map a hostname dynamically to the host machine's IP, use the special `host-gateway` value: + +```yaml +services: + app: + image: myapp + extra_hosts: + - "host.docker.internal:host-gateway" +``` + +On Linux, `host-gateway` resolves to the host's IP on the default bridge network. On Mac and Windows, Docker automatically provides this, `host-gateway` resolves to the same internal IP address as `host.docker.internal`. + +You can also drive `extra_hosts` from environment variables, which makes it easy to point services at different targets per environment: + +```yaml +services: + app: + image: myapp + extra_hosts: + - "api.service:${API_HOST:-127.0.0.1}" + - "auth.service:${AUTH_HOST:-127.0.0.1}" +``` + +Where `.env.development` might set `API_HOST=localhost` and a production env file might set `API_HOST=10.0.1.50`. + +To verify what has been injected, inspect the hosts file inside the container: + +```bash +$ docker compose exec app cat /etc/hosts +``` + +## Multi-host networking + +When deploying a Compose application on a Docker Engine with [Swarm mode enabled](/manuals/engine/swarm/_index.md), you can use the built-in `overlay` driver to enable multi-host communication. Overlay networks are always created as `attachable`. You can optionally set the [`attachable`](/reference/compose-file/networks.md#attachable) property to `false`. + +To learn more, see the [overlay network driver documentation](/manuals/engine/network/drivers/overlay.md). + +## Link containers + +Links allow you to define extra aliases by which a service is reachable from another service. They are not required for basic service-to-service communication. By default, any service can reach any other service at that service's name. In the following example, `db` is reachable from `web` at both the hostnames `db` and `database`: + +```yaml +services: + web: + build: . + links: + - "db:database" + db: + image: postgres:latest +``` + +See the [links reference](/reference/compose-file/services.md#links) for more information. + +## Debugging + +When a service can't reach another, work through the following steps in order: first confirm the network configuration looks right, then confirm the containers are actually attached, then test live connectivity. + +### Inspect port mappings + +To find out which host port maps to a container port, use `docker compose port`: + +```bash +# Which host port maps to container port 5432 on db? +$ docker compose port db 5432 +# Output: 0.0.0.0:8001 +``` + +This is especially useful when using dynamic port mapping, where the host port changes on every `docker compose up`: + +```yaml +services: + web: + image: nginx + ports: + - "80" # Docker assigns the host port dynamically +``` + +```bash +$ docker compose port web 80 +# Output: 0.0.0.0:55432 +``` + +When you scale a service, each replica gets its own dynamic port. Use `--index` to query a specific replica: + +```bash +$ docker compose up -d --scale web=3 + +$ docker compose port --index=1 web 80 # Output: 0.0.0.0:55001 +$ docker compose port --index=2 web 80 # Output: 0.0.0.0:55002 +$ docker compose port --index=3 web 80 # Output: 0.0.0.0:55003 +``` + +By default, `docker compose port` looks for TCP mappings. If a service exposes both TCP and UDP on the same port, use `--protocol`: + +```bash +$ docker compose port --protocol=udp myservice 53 +``` + +### Verify network membership + +To check which containers are attached to a network (useful when troubleshooting connectivity across external or custom networks): + +```bash +$ docker network inspect +``` + +### Check connectivity + +If the network membership looks correct but services still can't reach each other, test connectivity from inside a running container using `docker compose exec`. -## Further reference information +## Further reference information For full details of the network configuration options available, see the following references: diff --git a/content/manuals/compose/how-tos/oci-artifact.md b/content/manuals/compose/how-tos/oci-artifact.md index 0791df4e6f6..6a2a6c669ee 100644 --- a/content/manuals/compose/how-tos/oci-artifact.md +++ b/content/manuals/compose/how-tos/oci-artifact.md @@ -1,14 +1,9 @@ --- -title: Using Docker Compose with OCI artifacts +title: Package and deploy Docker Compose applications as OCI artifacts linkTitle: OCI artifact applications weight: 110 -description: How to publish and start Compose applications as OCI artifacts -keywords: cli, compose, oci, docker hub, artificats, publish, package, distribute -params: - sidebar: - badge: - color: green - text: New +description: Learn how to package, publish, and securely run Docker Compose applications from OCI-compliant registries. +keywords: cli, compose, oci, docker hub, artificats, publish, package, distribute, docker compose oci support --- {{< summary-bar feature_name="Compose OCI artifact" >}} @@ -18,7 +13,7 @@ Docker Compose supports working with [OCI artifacts](/manuals/docker-hub/repos/m ## Publish your Compose application as an OCI artifact To distribute your Compose application as an OCI artifact, you can use the `docker compose publish` command, to publish it to an OCI-compliant registry. -This allows others to deploy your application directly from the registry. +This allows others to then deploy your application directly from the registry. The publish function supports most of the composition capabilities of Compose, like overrides, extends or include, [with some limitations](#limitations). @@ -84,12 +79,12 @@ Are you ok to publish these environment variables? [y/N]: If you decline, the publish process stops without sending anything to the registry. -### Limitations +## Limitations -There is limitations to publishing Compose applications as OCI artifacts. You can't publish a Compose configuration: +There are limitations to publishing Compose applications as OCI artifacts. You can't publish a Compose configuration: - With service(s) containing bind mounts - With service(s) containing only a `build` section -- That includes local files with the `include` attribute. To publish successfully, ensure that any included local files are also published. You can then `include` to reference these files as remote `include` is supported. +- That includes local files with the `include` attribute. To publish successfully, ensure that any included local files are also published. You can then use `include` to reference these files as remote `include` is supported. ## Start an OCI artifact application @@ -147,3 +142,10 @@ The `docker compose publish` command supports non-interactive execution, letting ```console $ docker compose publish -y username/my-compose-app:latest ``` + +## Next steps + +- [Familiarize yourself with Compose's trust model](/manuals/compose/trust-model.md) +- [Learn about OCI artifacts in Docker Hub](/manuals/docker-hub/repos/manage/hub-images/oci-artifacts.md) +- [Compose publish command](/reference/cli/docker/compose/publish/) +- [Understand `include`](/reference/compose-file/include.md) diff --git a/content/manuals/compose/how-tos/production.md b/content/manuals/compose/how-tos/production.md index 0392c00ff9b..c520bf6105c 100644 --- a/content/manuals/compose/how-tos/production.md +++ b/content/manuals/compose/how-tos/production.md @@ -1,6 +1,6 @@ --- -description: Guide to using Docker Compose in production -keywords: compose, orchestration, containers, production +description: Learn how to configure, deploy, and update Docker Compose applications for production environments. +keywords: compose, orchestration, containers, production, production docker compose configuration title: Use Compose in production weight: 100 aliases: @@ -29,8 +29,8 @@ production. These changes might include: - Adding extra services such as a log aggregator For this reason, consider defining an additional Compose file, for example -`compose.production.yaml`, which specifies production-appropriate -configuration. This configuration file only needs to include the changes you want to make from the original Compose file. The additional Compose file +`compose.production.yaml`, with production-specific +configuration details. This configuration file only needs to include the changes you want to make from the original Compose file. The additional Compose file is then applied over the original `compose.yaml` to create a new configuration. Once you have a second configuration file, you can use it with the @@ -55,7 +55,7 @@ $ docker compose up --no-deps -d web This first command rebuilds the image for `web` and then stops, destroys, and recreates just the `web` service. The `--no-deps` flag prevents Compose from also -recreating any services which `web` depends on. +recreating any services that `web` depends on. ### Running Compose on a single server @@ -65,3 +65,9 @@ appropriately. For more information, see [pre-defined environment variables](env Once you've set up your environment variables, all the normal `docker compose` commands work with no further configuration. + +## Next steps + +- [Familiarize yourself with Compose's trust model](/manuals/compose/trust-model.md) +- [Using multiple Compose files](multiple-compose-files/_index.md) + diff --git a/content/manuals/compose/how-tos/profiles.md b/content/manuals/compose/how-tos/profiles.md index 5d90153606b..74641023c18 100644 --- a/content/manuals/compose/how-tos/profiles.md +++ b/content/manuals/compose/how-tos/profiles.md @@ -50,7 +50,7 @@ Valid profiles names follow the regex format of `[a-zA-Z0-9][a-zA-Z0-9_.-]+`. ## Start specific profiles -To start a specific profile supply the `--profile` [command-line option](/reference/cli/docker/compose.md) or +To start a specific profile supply the `--profile` [command-line option](/reference/cli/docker/compose/) or use the [`COMPOSE_PROFILES` environment variable](environment-variables/envvars.md#compose_profiles): ```console @@ -85,6 +85,12 @@ If you want to enable all profiles at the same time, you can run `docker compose ## Auto-starting profiles and dependency resolution +When you explicitly target a service on the command line that has one or more profiles assigned, you do not need to enable the profile manually as Compose runs that service regardless of whether its profile is activated. This is useful for running one-off services or debugging tools. + +Only the targeted service (and any of its declared dependencies via `depends_on`) is started. Other services that share the same profile will not be started unless: +- They are also explicitly targeted, or +- The profile is explicitly enabled using `--profile` or `COMPOSE_PROFILES`. + When a service with assigned `profiles` is explicitly targeted on the command line its profiles are started automatically so you don't need to start them manually. This can be used for one-off services and debugging tools. @@ -108,76 +114,23 @@ services: ``` ```sh -# Only start backend and db +# Only start backend and db (no profiles involved) $ docker compose up -d -# This runs db-migrations (and, if necessary, start db) -# by implicitly enabling the profiles "tools" +# Run the db-migrations service without manually enabling the 'tools' profile $ docker compose run db-migrations ``` -But keep in mind that `docker compose` only automatically starts the -profiles of the services on the command line and not of any dependencies. - -This means that any other services the targeted service `depends_on` should either: -- Share a common profile -- Always be started, by omitting `profiles` or having a matching profile started explicitly - -```yaml -services: - web: - image: web - - mock-backend: - image: backend - profiles: ["dev"] - depends_on: - - db - - db: - image: mysql - profiles: ["dev"] - - phpmyadmin: - image: phpmyadmin - profiles: ["debug"] - depends_on: - - db -``` - -```sh -# Only start "web" -$ docker compose up -d - -# Start mock-backend (and, if necessary, db) -# by implicitly enabling profiles "dev" -$ docker compose up -d mock-backend - -# This fails because profiles "dev" is not enabled -$ docker compose up phpmyadmin -``` - -Although targeting `phpmyadmin` automatically starts the profiles `debug`, it doesn't automatically start the profiles required by `db` which is `dev`. +In this example, `db-migrations` runs even though it is assigned to the tools profile, because it was explicitly targeted. The `db` service is also started automatically because it is listed in `depends_on`. -To fix this you either have to add the `debug` profile to the `db` service: - -```yaml -db: - image: mysql - profiles: ["debug", "dev"] -``` - -or start the `dev` profile explicitly: - -```console -# Profiles "debug" is started automatically by targeting phpmyadmin -$ docker compose --profile dev up phpmyadmin -$ COMPOSE_PROFILES=dev docker compose up phpmyadmin -``` +If the targeted service has dependencies that are also gated behind a profile, you must ensure those dependencies are either: + - In the same profile + - Started separately + - Not assigned to any profile so are always enabled ## Stop application and services with specific profiles -As with starting specific profiles, you can use the `--profile` [command-line option](/reference/cli/docker/compose.md#use--p-to-specify-a-project-name) or +As with starting specific profiles, you can use the `--profile` [command-line option](/reference/cli/docker/compose/#use--p-to-specify-a-project-name) or use the [`COMPOSE_PROFILES` environment variable](environment-variables/envvars.md#compose_profiles): ```console @@ -208,6 +161,7 @@ services: ``` if you only want to stop the `phpmyadmin` service, you can run + ```console $ docker compose down phpmyadmin ``` diff --git a/content/manuals/compose/how-tos/project-name.md b/content/manuals/compose/how-tos/project-name.md index 18372aa7cc5..be63fd17e40 100644 --- a/content/manuals/compose/how-tos/project-name.md +++ b/content/manuals/compose/how-tos/project-name.md @@ -1,20 +1,20 @@ --- title: Specify a project name weight: 10 -description: Understand the different ways you can set a project name in Compose and what the precedence is. +description: Learn how to set a custom project name in Compose and understand the precedence of each method. keywords: name, compose, project, -p flag, name top-level element aliases: - /compose/project-name/ --- -In Compose, the default project name is derived from the base name of the project directory. However, you have the flexibility to set a custom project name. +By default, Compose assigns the project name based on the name of the directory that contains the Compose file. You can override this with several methods. This page offers examples of scenarios where custom project names can be helpful, outlines the various methods to set a project name, and provides the order of precedence for each approach. > [!NOTE] > > The default project directory is the base directory of the Compose file. A custom value can also be set -> for it using the [`--project-directory` command line option](/reference/cli/docker/compose.md#use--p-to-specify-a-project-name). +> for it using the [`--project-directory` command line option](/reference/cli/docker/compose/#options). ## Example use cases @@ -42,4 +42,4 @@ The precedence order for each method, from highest to lowest, is as follows: ## What's next? - Read up on [working with multiple Compose files](multiple-compose-files/_index.md). -- Explore some [sample apps](samples-for-compose.md). +- Explore some [sample apps](https://github.com/docker/awesome-compose). diff --git a/content/manuals/compose/how-tos/provider-services.md b/content/manuals/compose/how-tos/provider-services.md index fa1ddbb3dad..4d9eb7a5810 100644 --- a/content/manuals/compose/how-tos/provider-services.md +++ b/content/manuals/compose/how-tos/provider-services.md @@ -3,11 +3,6 @@ title: Use provider services description: Learn how to use provider services in Docker Compose to integrate external capabilities into your applications keywords: compose, docker compose, provider, services, platform capabilities, integration, model runner, ai weight: 112 -params: - sidebar: - badge: - color: green - text: New --- {{< summary-bar feature_name="Compose provider services" >}} @@ -15,7 +10,6 @@ params: Docker Compose supports provider services, which allow integration with services whose lifecycles are managed by third-party components rather than by Compose itself. This feature enables you to define and utilize platform-specific services without the need for manual setup or direct lifecycle management. - ## What are provider services? Provider services are a special type of service in Compose that represents platform capabilities rather than containers. @@ -82,6 +76,7 @@ The `type` field in a provider service references the name of either: 1. A Docker CLI plugin (e.g., `docker-model`) 2. A binary available in the user's PATH +3. A path to the binary or script to execute When Compose encounters a provider service, it looks for a plugin or binary with the specified name to handle the provisioning of the requested capability. @@ -104,6 +99,10 @@ The plugin or binary is responsible for: This information is then passed to dependent services as environment variables. +> [!TIP] +> +> If you're working with AI models in Compose, use the [`models` top-level element](/manuals/ai/compose/models-and-compose.md) instead. + ## Benefits of using provider services Using provider services in your Compose applications offers several benefits: diff --git a/content/manuals/compose/how-tos/startup-order.md b/content/manuals/compose/how-tos/startup-order.md index 2234fff1569..156eda0d7eb 100644 --- a/content/manuals/compose/how-tos/startup-order.md +++ b/content/manuals/compose/how-tos/startup-order.md @@ -1,6 +1,6 @@ --- -description: How to control service startup and shutdown order in Docker Compose -keywords: documentation, docs, docker, compose, startup, shutdown, order +description: Learn how to manage service startup and shutdown order in Docker Compose using depends_on and healthchecks. +keywords: docker compose startup order, compose shutdown order, depends_on, service healthcheck, control service dependencies title: Control startup and shutdown order in Compose linkTitle: Control startup order weight: 30 @@ -13,7 +13,7 @@ You can control the order of service startup and shutdown with the containers in dependency order, where dependencies are determined by `depends_on`, `links`, `volumes_from`, and `network_mode: "service:..."`. -A good example of when you might use this is an application which needs to access a database. If both services are started with `docker compose up`, there is a chance this will fail since the application service might start before the database service and won't find a database able to handle its SQL statements. +For example, if your application needs to access a database and both services are started with `docker compose up`, there is a chance this will fail since the application service might start before the database service and won't find a database able to handle its SQL statements. ## Control startup @@ -40,9 +40,9 @@ services: redis: image: redis db: - image: postgres + image: postgres:18 healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] + test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"] interval: 10s retries: 5 start_period: 30s diff --git a/content/manuals/compose/how-tos/use-secrets.md b/content/manuals/compose/how-tos/use-secrets.md index 63680e6ac35..2afec05e530 100644 --- a/content/manuals/compose/how-tos/use-secrets.md +++ b/content/manuals/compose/how-tos/use-secrets.md @@ -1,9 +1,9 @@ --- -title: How to use secrets in Docker Compose +title: Manage secrets securely in Docker Compose linkTitle: Secrets in Compose weight: 60 -description: How to use secrets in Compose and their benefits -keywords: secrets, compose, security, environment variables +description: Learn how to securely manage runtime and build-time secrets in Docker Compose. +keywords: secrets, compose, security, environment variables, docker secrets, secure Docker builds, sensitive data in containers tags: [Secrets] aliases: - /compose/use-secrets/ @@ -25,7 +25,7 @@ Unlike the other methods, this permits granular access control within a service ## Examples -### Simple +### Single-service secret injection In the following example, the frontend service is given access to the `my_secret` secret. In the container, `/run/secrets/my_secret` is set to the contents of the file `./my_secret.txt`. @@ -40,7 +40,7 @@ secrets: file: ./my_secret.txt ``` -### Advanced +### Multi-service secret sharing and password management ```yaml services: @@ -84,7 +84,7 @@ In the advanced example above: - The `secrets` attribute under each service defines the secrets you want to inject into the specific container. - The top-level `secrets` section defines the variables `db_password` and `db_root_password` and provides the `file` that populates their values. -- The deployment of each container means Docker creates a temporary filesystem mount under `/run/secrets/` with their specific values. +- The deployment of each container means Docker creates a bind mount under `/run/secrets/` with their specific values. > [!NOTE] > @@ -109,6 +109,7 @@ secrets: ## Resources +- [Familiarize yourself with Compose's trust model](/manuals/compose/trust-model.md) - [Secrets top-level element](/reference/compose-file/secrets.md) - [Secrets attribute for services top-level element](/reference/compose-file/services.md#secrets) - [Build secrets](https://docs.docker.com/build/building/secrets/) diff --git a/content/manuals/compose/images/quick-hello-world-1.png b/content/manuals/compose/images/quick-hello-world-1.png deleted file mode 100644 index 9c0e99acfaf..00000000000 Binary files a/content/manuals/compose/images/quick-hello-world-1.png and /dev/null differ diff --git a/content/manuals/compose/images/quick-hello-world-2.png b/content/manuals/compose/images/quick-hello-world-2.png deleted file mode 100644 index 65841c0b9cf..00000000000 Binary files a/content/manuals/compose/images/quick-hello-world-2.png and /dev/null differ diff --git a/content/manuals/compose/images/quick-hello-world-3.png b/content/manuals/compose/images/quick-hello-world-3.png deleted file mode 100644 index ed500e020c3..00000000000 Binary files a/content/manuals/compose/images/quick-hello-world-3.png and /dev/null differ diff --git a/content/manuals/compose/images/v1-versus-v2-versus-v5.png b/content/manuals/compose/images/v1-versus-v2-versus-v5.png new file mode 100644 index 00000000000..4e4caecd56b Binary files /dev/null and b/content/manuals/compose/images/v1-versus-v2-versus-v5.png differ diff --git a/content/manuals/compose/images/v1-versus-v2.png b/content/manuals/compose/images/v1-versus-v2.png deleted file mode 100644 index 293d799525b..00000000000 Binary files a/content/manuals/compose/images/v1-versus-v2.png and /dev/null differ diff --git a/content/manuals/compose/install/_index.md b/content/manuals/compose/install/_index.md index 750c05b10d1..510942e8f98 100644 --- a/content/manuals/compose/install/_index.md +++ b/content/manuals/compose/install/_index.md @@ -1,9 +1,7 @@ --- description: Learn how to install Docker Compose. Compose is available natively on Docker Desktop, as a Docker Engine plugin, and as a standalone tool. -keywords: install docker compose, docker compose install, install docker compose ubuntu, - installing docker compose, docker compose download, docker compose not found, docker - compose windows, how to install docker compose +keywords: install docker compose, docker compose plugin, install compose linux, install docker desktop, docker compose windows, standalone docker compose, docker compose not found title: Overview of installing Docker Compose linkTitle: Install weight: 20 @@ -18,7 +16,7 @@ This page summarizes the different ways you can install Docker Compose, dependin ## Installation scenarios -### Scenario one: Install Docker Desktop (Recommended) +### Docker Desktop (Recommended) The easiest and recommended way to get Docker Compose is to install Docker Desktop. @@ -33,7 +31,7 @@ Docker Desktop is available for: > > If you have already installed Docker Desktop, you can check which version of Compose you have by selecting **About Docker Desktop** from the Docker menu {{< inline-image src="../../desktop/images/whale-x.svg" alt="whale menu" >}}. -### Scenario two: Install the Docker Compose plugin (Linux only) +### Plugin (Linux only) > [!IMPORTANT] > @@ -43,7 +41,7 @@ If you already have Docker Engine and Docker CLI installed, you can install the - [Using Docker's repository](linux.md#install-using-the-repository) - [Downloading and installing manually](linux.md#install-the-plugin-manually) -### Scenario three: Install the Docker Compose standalone (Legacy) +### Standalone (Legacy) > [!WARNING] > diff --git a/content/manuals/compose/install/linux.md b/content/manuals/compose/install/linux.md index d1b47fc2d13..61e099fcb10 100644 --- a/content/manuals/compose/install/linux.md +++ b/content/manuals/compose/install/linux.md @@ -1,10 +1,6 @@ --- -description: Download and install Docker Compose on Linux with this step-by-step handbook. - This plugin can be installed manually or by using a repository. -keywords: install docker compose linux, docker compose linux, docker compose plugin, - docker-compose-plugin, linux install docker compose, install docker-compose linux, - linux install docker-compose, linux docker compose, docker compose v2 linux, install - docker compose on linux +description: Step-by-step instructions for installing the Docker Compose plugin on Linux using a package repository or manual method. +keywords: install docker compose linux, docker compose plugin, docker-compose-plugin linux, docker compose v2, docker compose manual install, linux docker compose toc_max: 3 title: Install the Docker Compose plugin linkTitle: Plugin @@ -34,8 +30,7 @@ To install the Docker Compose plugin on Linux, you can either: [Debian](/manuals/engine/install/debian.md#install-using-the-repository) | [Raspberry Pi OS](/manuals/engine/install/raspberry-pi-os.md#install-using-the-repository) | [Fedora](/manuals/engine/install/fedora.md#set-up-the-repository) | - [RHEL](/manuals/engine/install/rhel.md#set-up-the-repository) | - [SLES](/manuals/engine/install/sles.md#set-up-the-repository). + [RHEL](/manuals/engine/install/rhel.md#set-up-the-repository). 2. Update the package index, and install the latest version of Docker Compose: @@ -77,9 +72,9 @@ To update the Docker Compose plugin, run the following commands: ## Install the plugin manually -> [!IMPORTANT] +> [!WARNING] > -> This option requires you to manage upgrades manually. It is recommended that you set up Docker's repository for easier maintenance. +> Manual installations don’t auto-update. For ease of maintenance, use the Docker repository method. 1. To download and install the Docker Compose CLI plugin, run: @@ -113,4 +108,8 @@ To update the Docker Compose plugin, run the following commands: ```console $ docker compose version ``` - \ No newline at end of file + +## What's next? + +- [Understand how Compose works](/manuals/compose/intro/compose-application-model.md) +- [Try the Quickstart guide](/manuals/compose/gettingstarted.md) diff --git a/content/manuals/compose/install/standalone.md b/content/manuals/compose/install/standalone.md index d878c2daf74..add1a100570 100644 --- a/content/manuals/compose/install/standalone.md +++ b/content/manuals/compose/install/standalone.md @@ -1,18 +1,26 @@ --- -title: Install the Docker Compose standalone -linkTitle: Standalone -description: How to install Docker Compose - Other Scenarios -keywords: compose, orchestration, install, installation, docker, documentation +title: Install the Docker Compose standalone (Legacy) +linkTitle: Standalone (Legacy) +description: Instructions for installing the legacy Docker Compose standalone tool on Linux and Windows Server +keywords: install docker-compose, standalone docker compose, docker-compose windows server, install docker compose linux, legacy compose install toc_max: 3 weight: 20 --- +> [!WARNING] +> +> This install scenario is not recommended and is only supported for backward compatibility purposes. +> Use [Docker Desktop](/manuals/desktop/_index.md) or the +> [Docker Compose plugin](/manuals/compose/install/linux.md) instead. +> Use the standalone binary only if you cannot use either of these options. + This page contains instructions on how to install Docker Compose standalone on Linux or Windows Server, from the command line. > [!WARNING] > > The Docker Compose standalone uses the `-compose` syntax instead of the current standard syntax `compose`. > For example, you must type `docker-compose up` when using Docker Compose standalone, instead of `docker compose up`. +> Use it only for backward compatibility. ## On Linux @@ -35,6 +43,7 @@ This page contains instructions on how to install Docker Compose standalone on L > If the command `docker-compose` fails after installation, check your path. > You can also create a symbolic link to `/usr/bin` or any other directory in your path. > For example: +> > ```console > $ sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose > ``` @@ -47,14 +56,14 @@ on Microsoft Windows Server](/manuals/engine/install/binaries.md#install-server- 1. Run PowerShell as an administrator. In order to proceed with the installation, select **Yes** when asked if you want this app to make changes to your device. -2. Optional. Ensure TLS1.2 is enabled. +2. Optional. Ensure TLS1.2 is enabled. GitHub requires TLS1.2 for secure connections. If you’re using an older version of Windows Server, for example 2016, or suspect that TLS1.2 is not enabled, run the following command in PowerShell: ```powershell [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 ``` -3. Download the latest release of Docker Compose ({{% param "compose_version" %}}). Run the following command: +3. Download the latest release of Docker Compose ({{% param "compose_version" %}}). Run the following command: ```powershell Start-BitsTransfer -Source "https://github.com/docker/compose/releases/download/{{% param "compose_version" %}}/docker-compose-windows-x86_64.exe" -Destination $Env:ProgramFiles\Docker\docker-compose.exe @@ -65,8 +74,8 @@ on Microsoft Windows Server](/manuals/engine/install/binaries.md#install-server- > [!NOTE] > > On Windows Server 2019 you can add the Compose executable to `$Env:ProgramFiles\Docker`. - Because this directory is registered in the system `PATH`, you can run the `docker-compose --version` - command on the subsequent step with no additional configuration. + > Because this directory is registered in the system `PATH`, you can run the `docker-compose --version` + > command on the subsequent step with no additional configuration. 4. Test the installation. @@ -74,3 +83,8 @@ on Microsoft Windows Server](/manuals/engine/install/binaries.md#install-server- $ docker-compose.exe version Docker Compose version {{% param "compose_version" %}} ``` + +## What's next? + +- [Understand how Compose works](/manuals/compose/intro/compose-application-model.md) +- [Try the Quickstart guide](/manuals/compose/gettingstarted.md) diff --git a/content/manuals/compose/install/uninstall.md b/content/manuals/compose/install/uninstall.md index 16585ab8b22..714389deda0 100644 --- a/content/manuals/compose/install/uninstall.md +++ b/content/manuals/compose/install/uninstall.md @@ -2,6 +2,7 @@ description: How to uninstall Docker Compose keywords: compose, orchestration, uninstall, uninstallation, docker, documentation title: Uninstall Docker Compose +linkTitle: Uninstall --- How you uninstall Docker Compose depends on how it was installed. This guide covers uninstallation instructions for: @@ -13,7 +14,7 @@ How you uninstall Docker Compose depends on how it was installed. This guide cov If you want to uninstall Docker Compose and you have installed Docker Desktop, see [Uninstall Docker Desktop](/manuals/desktop/uninstall.md). -> [!NOTE] +> [!WARNING] > > Unless you have other Docker instances installed on that specific environment, uninstalling Docker Desktop removes all Docker components, including Docker Engine, Docker CLI, and Docker Compose. diff --git a/content/manuals/compose/intro/compose-application-model.md b/content/manuals/compose/intro/compose-application-model.md index 127e99501a9..80f7c5658c8 100644 --- a/content/manuals/compose/intro/compose-application-model.md +++ b/content/manuals/compose/intro/compose-application-model.md @@ -1,8 +1,8 @@ --- title: How Compose works weight: 10 -description: Understand how Compose works and the Compose application model with an illustrative example -keywords: compose, docker compose, compose specification, compose model +description: Learn how Docker Compose works, from the application model to Compose files and CLI, whilst following a detailed example. +keywords: docker compose, compose.yaml, docker compose model, compose cli, multi-container application, compose example aliases: - /compose/compose-file/02-model/ - /compose/compose-yaml-file/ @@ -21,7 +21,7 @@ Services communicate with each other through [networks](/reference/compose-file/ Services store and share persistent data into [volumes](/reference/compose-file/volumes.md). The Specification describes such a persistent data as a high-level filesystem mount with global options. -Some services require configuration data that is dependent on the runtime or platform. For this, the Specification defines a dedicated [configs](/reference/compose-file/configs.md) concept. From a service container point of view, configs are comparable to volumes, in that they are files mounted into the container. But the actual definition involves distinct platform resources and services, which are abstracted by this type. +Some services require configuration data that is dependent on the runtime or platform. For this, the Specification defines a dedicated [configs](/reference/compose-file/configs.md) concept. From inside the container, configs behave like volumes—they’re mounted as files. However, configs are defined differently at the platform level. A [secret](/reference/compose-file/secrets.md) is a specific flavor of configuration data for sensitive data that should not be exposed without security considerations. Secrets are made available to services as files mounted into their containers, but the platform-specific resources to provide sensitive data are specific enough to deserve a distinct concept and definition within the Compose Specification. @@ -47,7 +47,7 @@ You can use [fragments](/reference/compose-file/fragments.md) and [extensions](/ Multiple Compose files can be [merged](/reference/compose-file/merge.md) together to define the application model. The combination of YAML files is implemented by appending or overriding YAML elements based on the Compose file order you set. Simple attributes and maps get overridden by the highest order Compose file, lists get merged by appending. Relative -paths are resolved based on the first Compose file's parent folder, whenever complimentary files being +paths are resolved based on the first Compose file's parent folder, whenever complementary files being merged are hosted in other folders. As some Compose file elements can both be expressed as single strings or complex objects, merges apply to the expanded form. For more information, see [Working with multiple Compose files](/manuals/compose/how-tos/multiple-compose-files/_index.md). @@ -55,7 +55,9 @@ If you want to reuse other Compose files, or factor out parts of your applicatio ## CLI -The Docker CLI lets you interact with your Docker Compose applications through the `docker compose` command, and its subcommands. Using the CLI, you can manage the lifecycle of your multi-container applications defined in the `compose.yaml` file. The CLI commands enable you to start, stop, and configure your applications effortlessly. +The Docker CLI lets you interact with your Docker Compose applications through the `docker compose` command and its subcommands. If you're using Docker Desktop, the Docker Compose CLI is included by default. + +Using the CLI, you can manage the lifecycle of your multi-container applications defined in the `compose.yaml` file. The CLI commands enable you to start, stop, and configure your applications effortlessly. ### Key commands @@ -83,7 +85,7 @@ To list all the services along with their current status: $ docker compose ps ``` -For a full list of all the Compose CLI commands, see the [reference documentation](/reference/cli/docker/compose/_index.md). +For a full list of all the Compose CLI commands, see the [reference documentation](/reference/cli/docker/compose/). ## Illustrative example @@ -101,11 +103,11 @@ Both services communicate with each other on an isolated back-tier network, whil The example application is composed of the following parts: -- 2 services, backed by Docker images: `webapp` and `database` -- 1 secret (HTTPS certificate), injected into the frontend -- 1 configuration (HTTP), injected into the frontend -- 1 persistent volume, attached to the backend -- 2 networks +- Two services, backed by Docker images: `webapp` and `database` +- One secret (HTTPS certificate), injected into the frontend +- One configuration (HTTP), injected into the frontend +- One persistent volume, attached to the backend +- Two networks ```yml services: @@ -162,6 +164,6 @@ example-backend-1 example/database "docker-entrypoint.s…" backend ## What's next -- [Quickstart](/manuals/compose/gettingstarted.md) -- [Explore some sample applications](/manuals/compose/support-and-feedback/samples-for-compose.md) +- [Try the Quickstart guide](/manuals/compose/gettingstarted.md) +- [Explore some sample applications](https://github.com/docker/awesome-compose) - [Familiarize yourself with the Compose Specification](/reference/compose-file/_index.md) diff --git a/content/manuals/compose/intro/features-uses.md b/content/manuals/compose/intro/features-uses.md index 1545bd81407..0860ff42962 100644 --- a/content/manuals/compose/intro/features-uses.md +++ b/content/manuals/compose/intro/features-uses.md @@ -1,26 +1,24 @@ --- -description: Key benefits and use cases of Docker Compose -keywords: documentation, docs, docker, compose, orchestration, containers, uses, benefits +description: Discover the benefits and typical use cases of Docker Compose for containerized application development and deployment +keywords: docker compose, compose use cases, compose benefits, container orchestration, development environments, testing containers, yaml file title: Why use Compose? weight: 20 -aliases: -- /compose/features-uses/ +aliases: + - /compose/features-uses/ --- ## Key benefits of Docker Compose Using Docker Compose offers several benefits that streamline the development, deployment, and management of containerized applications: -- Simplified control: Docker Compose allows you to define and manage multi-container applications in a single YAML file. This simplifies the complex task of orchestrating and coordinating various services, making it easier to manage and replicate your application environment. +- Simplified control: Define and manage multi-container apps in one YAML file, streamlining orchestration and replication. -- Efficient collaboration: Docker Compose configuration files are easy to share, facilitating collaboration among developers, operations teams, and other stakeholders. This collaborative approach leads to smoother workflows, faster issue resolution, and increased overall efficiency. +- Efficient collaboration: Shareable YAML files support smooth collaboration between developers and operations, improving workflows and issue resolution, leading to increased overall efficiency. -- Rapid application development: Compose caches the configuration used to create a container. When you restart a service that has not changed, Compose re-uses the existing containers. Re-using containers means that you can make changes to your environment very quickly. +- Rapid application development: Compose caches the configuration used to create a container. When you restart a service that has not changed, Compose reuses the existing containers. Reusing containers means that you can make changes to your environment quickly. - Portability across environments: Compose supports variables in the Compose file. You can use these variables to customize your composition for different environments, or different users. -- Extensive community and support: Docker Compose benefits from a vibrant and active community, which means abundant resources, tutorials, and support. This community-driven ecosystem contributes to the continuous improvement of Docker Compose and helps users troubleshoot issues effectively. - ## Common use cases of Docker Compose Compose can be used in many different ways. Some common use cases are outlined @@ -57,8 +55,9 @@ $ docker compose down ### Single host deployments -Compose has traditionally been focused on development and testing workflows, -but with each release we're making progress on more production-oriented features. +Compose supports production deployments on single hosts. You can use +Compose to deploy applications to remote Docker hosts and manage +production-specific configurations. For details on using production-oriented features, see [Compose in production](/manuals/compose/how-tos/production.md). @@ -67,4 +66,4 @@ For details on using production-oriented features, see - [Learn about the history of Compose](history.md) - [Understand how Compose works](compose-application-model.md) -- [Quickstart](../gettingstarted.md) +- [Try the Quickstart guide](../gettingstarted.md) diff --git a/content/manuals/compose/intro/history.md b/content/manuals/compose/intro/history.md index 862cb9eb12b..f5aab1d24f4 100644 --- a/content/manuals/compose/intro/history.md +++ b/content/manuals/compose/intro/history.md @@ -1,7 +1,7 @@ --- title: History and development of Docker Compose linkTitle: History and development -description: History of Compose v1 and Compose YAML schema versioning +description: Explore the evolution of Docker Compose from v1 to v5, including CLI changes, YAML versioning, and the Compose Specification. keywords: compose, compose yaml, swarm, migration, compatibility, docker compose vs docker-compose weight: 30 aliases: @@ -10,41 +10,48 @@ aliases: This page provides: - A brief history of the development of the Docker Compose CLI - - A clear explanation of the major versions and file formats that make up Compose v1 and Compose v2 - - The main differences between Compose V1 and Compose v2 + - A clear explanation of the major versions and file formats that make up Compose v1, v2, and v5 + - The main differences between Compose v1, v2, and v5 ## Introduction -![Image showing the main differences between Compose v1 and Compose v2](../images/v1-versus-v2.png) +![Image showing the main differences between Compose v1, Compose v2, and Compose v5](../images/v1-versus-v2-versus-v5.png) -The previous image shows that the currently supported version of the Docker Compose CLI is Compose v2 which is defined by the [Compose Specification](/reference/compose-file/_index.md). +The diagram above highlights the key differences between Docker Compose v1, v2, and v5. Today, the supported Docker Compose CLI versions are Compose v2 and Compose v5, both of which are defined by the [Compose Specification](/reference/compose-file/_index.md). -It also provides a quick snapshot of the differences in file formats, command-line syntax, and top-level elements. This is covered in more detail in the following sections. +The diagram provides a high-level comparison of file formats, command-line syntax, and supported top-level elements. This is covered in more detail in the following sections. ### Docker Compose CLI versioning -Version one of the Docker Compose command-line binary was first released in 2014. It was written in Python, and is invoked with `docker-compose`. -Typically, Compose V1 projects include a top-level `version` element in the `compose.yaml` file, with values ranging from `2.0` to `3.8`, which refer to the specific [file formats](#compose-file-format-versioning). +Compose v1 was first released in 2014. It was written in Python and invoked with `docker-compose`. +Typically, Compose v1 projects include a top-level `version` element in the `compose.yaml` file, with values ranging from `2.0` to `3.8`, which refer to the specific [file formats](#compose-file-format-versioning). -Version two of the Docker Compose command-line binary was announced in 2020, is written in Go, and is invoked with `docker compose`. -Compose v2 ignores the `version` top-level element in the `compose.yaml` file. +Compose v2, announced in 2020, is written in Go and is invoked with `docker compose`. +Unlike v1, Compose v2 ignores the `version` top-level element in the `compose.yaml` file and relies entirely on the Compose Specification to interpret the file. + +Compose v5, released in 2025, is functionally identical to Compose v2. Its primary distinction is the introduction of an official [Go SDK](/manuals/compose/compose-sdk.md). This SDK provides a comprehensive API that lets you integrate Compose functionality directly into your applications, allowing you to load, validate, and manage multi-container environments without relying on the Compose CLI. To avoid confusion with the legacy Compose file formats labeled “v2” and “v3,” the versioning was advanced directly to v5. ### Compose file format versioning The Docker Compose CLIs are defined by specific file formats. -Three major versions of the Compose file format for Compose V1 were released: +Three major versions of the Compose file format for Compose v1 were released: - Compose file format 1 with Compose 1.0.0 in 2014 - Compose file format 2.x with Compose 1.6.0 in 2016 - Compose file format 3.x with Compose 1.10.0 in 2017 Compose file format 1 is substantially different to all the following formats as it lacks a top-level `services` key. -Its usage is historical and files written in this format don't run with Compose v2. +Its usage is historical and files written in this format don't run with Compose v2 or v5. Compose file format 2.x and 3.x are very similar to each other, but the latter introduced many new options targeted at Swarm deployments. To address confusion around Compose CLI versioning, Compose file format versioning, and feature parity depending on whether Swarm mode was in use, file format 2.x and 3.x were merged into the [Compose Specification](/reference/compose-file/_index.md). -Compose v2 uses the Compose Specification for project definition. Unlike the prior file formats, the Compose Specification is rolling and makes the `version` top-level element optional. Compose v2 also makes use of optional specifications - [Deploy](/reference/compose-file/deploy.md), [Develop](/reference/compose-file/develop.md), and [Build](/reference/compose-file/build.md). +Compose v2 and v5 uses the Compose Specification for project definition. Unlike the prior file formats, the Compose Specification is rolling and makes the `version` top-level element optional. Compose v2 and v5 also makes use of optional specifications - [Deploy](/reference/compose-file/deploy.md), [Develop](/reference/compose-file/develop.md), and [Build](/reference/compose-file/build.md). + +To make migration easier, Compose v2 and v5 has backwards compatibility for certain elements that have been deprecated or changed between Compose file format 2.x/3.x and the Compose Specification. + +## What's next? -To make [migration](/manuals/compose/releases/migrate.md) easier, Compose v2 has backwards compatibility for certain elements that have been deprecated or changed between Compose file format 2.x/3.x and the Compose Specification. +- [How Compose works](compose-application-model.md) +- [Compose Specification reference](/reference/compose-file/_index.md) diff --git a/content/manuals/compose/release-notes.md b/content/manuals/compose/release-notes.md new file mode 100644 index 00000000000..c6055dbd2f0 --- /dev/null +++ b/content/manuals/compose/release-notes.md @@ -0,0 +1,7 @@ +--- +title: Release notes +weight: 90 +params: + sidebar: + goto: "https://github.com/docker/compose/releases" +--- \ No newline at end of file diff --git a/content/manuals/compose/releases/_index.md b/content/manuals/compose/releases/_index.md deleted file mode 100644 index 1d44967fb9a..00000000000 --- a/content/manuals/compose/releases/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -build: - render: never -title: Releases -weight: 70 ---- \ No newline at end of file diff --git a/content/manuals/compose/releases/migrate.md b/content/manuals/compose/releases/migrate.md deleted file mode 100644 index 1fc0ef12669..00000000000 --- a/content/manuals/compose/releases/migrate.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Migrate to Compose v2 -weight: 20 -description: How to migrate from Compose v1 to v2 -keywords: compose, upgrade, migration, v1, v2, docker compose vs docker-compose -aliases: -- /compose/compose-v2/ -- /compose/cli-command-compatibility/ -- /compose/migrate/ ---- - -From July 2023, Compose v1 stopped receiving updates. It’s also no longer available in new releases of Docker Desktop. - -Compose v2, which was first released in 2020, is included with all currently supported versions of Docker Desktop. It offers an improved CLI experience, improved build performance with BuildKit, and continued new-feature development. - -## How do I switch to Compose v2? - -The easiest and recommended way is to make sure you have the latest version of [Docker Desktop](/manuals/desktop/release-notes.md), which bundles the Docker Engine and Docker CLI platform including Compose v2. - -With Docker Desktop, Compose v2 is always accessible as `docker compose`. - -For manual installs on Linux, you can get Compose v2 by either: -- [Using Docker's repository](/manuals/compose/install/linux.md#install-using-the-repository) (recommended) -- [Downloading and installing manually](/manuals/compose/install/linux.md#install-the-plugin-manually) - -## What are the differences between Compose v1 and Compose v2? - -### `docker-compose` vs `docker compose` - -Unlike Compose v1, Compose v2 integrates into the Docker CLI platform and the recommended command-line syntax is `docker compose`. - -The Docker CLI platform provides a consistent and predictable set of options and flags, such as the `DOCKER_HOST` environment variable or the `--context` command-line flag. - -This change lets you use all of the shared flags on the root `docker` command. -For example, `docker --log-level=debug --tls compose up` enables debug logging from the Docker Engine as well as ensuring that TLS is used for the connection. - -> [!TIP] -> -> Update scripts to use Compose v2 by replacing the hyphen (`-`) with a space, using `docker compose` instead of `docker-compose`. - -### Service container names - -Compose generates container names based on the project name, service name, and scale/replica count. - -In Compose v1, an underscore (`_`) was used as the word separator. -In Compose v2, a hyphen (`-`) is used as the word separator. - -Underscores aren't valid characters in DNS hostnames. -By using a hyphen instead, Compose v2 ensures service containers can be accessed over the network via consistent, predictable hostnames. - -For example, running the Compose command `-p myproject up --scale=1 svc` results in a container named `myproject_svc_1` with Compose v1 and a container named `myproject-svc-1` with Compose v2. - -> [!TIP] -> -> In Compose v2, the global `--compatibility` flag or `COMPOSE_COMPATIBILITY` environment variable preserves the Compose v1 behavior to use underscores (`_`) as the word separator. -As this option must be specified for every Compose v2 command run, it's recommended that you only use this as a temporary measure while transitioning to Compose v2. - -### Command-line flags and subcommands - -Compose v2 supports almost all Compose V1 flags and subcommands, so in most cases, it can be used as a drop-in replacement in scripts. - -#### Unsupported in v2 - -The following were deprecated in Compose v1 and aren't supported in Compose v2: -* `docker-compose scale`. Use `docker compose up --scale` instead. -* `docker-compose rm --all` - -#### Different in v2 - -The following behave differently between Compose v1 and v2: - -| | Compose v1 | Compose v2 | -|-------------------------|------------------------------------------------------------------|-------------------------------------------------------------------------------| -| `--compatibility` | Deprecated. Migrates YAML fields based on legacy schema version. | Uses `_` as word separator for container names instead of `-` to match v1. | -| `ps --filter KEY-VALUE` | Undocumented. Allows filtering by arbitrary service properties. | Only allows filtering by specific properties, e.g. `--filter=status=running`. | - -### Environment variables - -Environment variable behavior in Compose v1 wasn't formally documented and behaved inconsistently in some edge cases. - -For Compose v2, the [Environment variables](/manuals/compose/how-tos/environment-variables/_index.md) section covers both [precedence](/manuals/compose/how-tos/environment-variables/envvars-precedence.md) as well as [`.env` file interpolation](/manuals/compose/how-tos/environment-variables/variable-interpolation.md) and includes many examples covering tricky situations such as escaping nested quotes. - -Check if: -- Your project uses multiple levels of environment variable overrides, for example `.env` file and `--env` CLI flags. -- Any `.env` file values have escape sequences or nested quotes. -- Any `.env` file values contain literal `$` signs in them. This is common with PHP projects. -- Any variable values use advanced expansion syntax, for example `${VAR:?error}`. - -> [!TIP] -> -> Run `docker compose config` on the project to preview the configuration after Compose v2 has performed interpolation to -verify that values appear as expected. -> -> Maintaining backwards compatibility with Compose v1 is typically achievable by ensuring that literal values (no -interpolation) are single-quoted and values that should have interpolation applied are double-quoted. - -## What does this mean for my projects that use Compose v1? - -For most projects, switching to Compose v2 requires no changes to the Compose YAML or your development workflow. - -It's recommended that you adapt to the new preferred way of running Compose v2, which is to use `docker compose` instead of `docker-compose`. -This provides additional flexibility and removes the requirement for a `docker-compose` compatibility alias. - -However, Docker Desktop continues to support a `docker-compose` alias to redirect commands to `docker compose` for convenience and improved compatibility with third-party tools and scripts. - -## Is there anything else I need to know before I switch? - -### Migrating running projects - -In both v1 and v2, running up on a Compose project recreates service containers as needed. It compares the actual state in the Docker Engine to the resolved project configuration, which includes the Compose YAML, environment variables, and command-line flags. - -Because Compose v1 and v2 [name service containers differently](#service-container-names), running `up` using v2 the first time on a project with running services originally launched by v1, results in service containers being recreated with updated names. - -Note that even if `--compatibility` flag is used to preserve the v1 naming style, Compose still needs to recreate service containers originally launched by v1 the first time `up` is run by v2 to migrate the internal state. - -### Using Compose v2 with Docker-in-Docker - -Compose v2 is now included in the [Docker official image on Docker Hub](https://hub.docker.com/_/docker). - -Additionally, a new [docker/compose-bin image on Docker Hub](https://hub.docker.com/r/docker/compose-bin) packages the latest version of Compose v2 for use in multi-stage builds. - -## Can I still use Compose v1 if I want to? - -Yes. You can still download and install Compose v1 packages, but you won't get support from Docker if anything breaks. - ->[!WARNING] -> -> The final Compose v1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk. - -## Additional Resources - -- [docker-compose v1 on PyPI](https://pypi.org/project/docker-compose/1.29.2/) -- [docker/compose v1 on Docker Hub](https://hub.docker.com/r/docker/compose) -- [docker-compose v1 source on GitHub](https://github.com/docker/compose/releases/tag/1.29.2) diff --git a/content/manuals/compose/releases/release-notes.md b/content/manuals/compose/releases/release-notes.md deleted file mode 100644 index a37c97ff204..00000000000 --- a/content/manuals/compose/releases/release-notes.md +++ /dev/null @@ -1,4333 +0,0 @@ ---- -title: Docker Compose release notes -linkTitle: Release notes -weight: 10 -description: Learn about the new features, bug fixes, and breaking changes for the newest Docker Compose release -keywords: release notes, compose -tags: [Release notes] -toc_max: 2 -aliases: -- /release-notes/docker-compose/ -- /compose/release-notes/ ---- - -For more detailed information, see the [release notes in the Compose repo](https://github.com/docker/compose/releases/). - -## 2.36.0 - -{{< release-date date="2025-05-07" >}} - -### Bug fixes and enhancements - -- Introduced `networks.interface_name` -- Added support for `COMPOSE_PROGRESS` env variable -- Added `service.provider` to external binaries -- Introduced build `--check` flag -- Fixed multiple panic issues when parsing Compose files - -### Update - -- Dependencies upgrade: bump compose-go to v2.6.2 -- Dependencies upgrade: bump docker engine and cli to v28.1.0 -- Dependencies upgrade: bump containerd to 2.0.5 -- Dependencies upgrade: bump buildkit to v0.21.1 - -## 2.35.1 - -{{< release-date date="2025-04-17" >}} - -### Bug fixes and enhancements - -- Fixed an issue with bind mounts - -### Update - -- Dependencies upgrade: bump compose-go to v2.6.0 -- Dependencies upgrade: bump docker engine and cli to v28.0.4 -- Dependencies upgrade: bump buildx to v0.22.0 - -## 2.35.0 - -{{< release-date date="2025-04-10" >}} - -### Bug fixes and enhancements - -- Added support for [Docker Model Runner](/manuals/ai/model-runner.md) to easily integrate AI models into your Compose applications -- Added `build --print` command to help debug complex build configurations by showing the equivalent bake file -- Added `volume.type=image` to provide more flexible volume management for container images -- Added `--quiet` options to the `run` command for cleaner output when running containers -- Added `config --no-env-resolution` option to view raw configuration without environment variable substitution -- Fixed behavior of `depends_on` to prevent unnecessary container recreation when dependencies change -- Fixed support for secrets defined by environment variables when using `include` -- Fixed volume mount handling to ensure bind mounts work correctly in all scenarios - -### Update - -- Dependencies upgrade: bump docker engine and cli to v28.1.0 -- Dependencies upgrade: bump buildx to v0.23.0 -- Dependencies upgrade: bump buildkit to v0.21.0 - -## 2.34.0 - -{{< release-date date="2025-03-14" >}} - -### Bug fixes and enhancements - -- Added support of refresh `pull_policy` values `daily`, `weekly` and `every_` -- Introduced `include` attribut to `watch` definition to match file patterns -- Introduced `--env-from-file` in flag for the `docker compose run` command -- Promoted `publish` as a regular command of Compose -- Fixed a bug by loading `env_file` after services have been selected - -### Update - -- Dependencies upgrade: bump docker engine and cli to v28.0.1 -- Dependencies upgrade: bump buildkit to v0.17.1 -- Dependencies upgrade: Bump compose-go v2.4.9 -- Dependencies upgrade: Bump buildx v0.21.2 - -## 2.33.1 - -{{< release-date date="2025-02-21" >}} - -### Bug fixes and enhancements - -- Added support for `gw_priority`, `enable_ipv4` (requires Docker v28.0) -- Fixed an issue with the navigation menu -- Improved error message when using non-file secret/config with read-only service - -### Update - -- Dependencies upgrade: bump docker engine and cli to v28.0.0 - -## 2.33.0 - -{{< release-date date="2025-02-13" >}} - -### Bug fixes and enhancements - -- Introduced a hint to promote the use of [Bake](/build/bake/) -- Introduced support for the `additional_context` attribute referencing another service -- Added support for `BUILDKIT_PROGRESS` -- Compose now warns you when a published Compose application includes environment variables -- Added a `--with-env` flag to publish a Compose application with environment variables -- Updated `ls --quiet` help description -- Fixed multiple issues delegating build to Bake -- Updated help in `stats` command -- Fixed support for "builtin" seccomp profile -- Fixed support for `watch` with multiple services -- Removed exit code per error type used by legacy metrics system -- Fixed test coverage for `compatibility` -- Removed raw os.Args sent to OpenTelemetry -- Enabled copyloopvar linter -- Fixed provenance for binaries and generate SBOM -- Main branch for docs upstream validation is now used -- Added codeowners file -- Added Docker Engine v28.x to the test-matrix - -### Update - -- Dependencies upgrade: Bump compose-go v2.4.8 -- Dependencies upgrade: Bump buildx v0.20.1 -- Dependencies upgrade: Bump docker to v27.5.1 -- Dependencies upgrade: Bump golangci-lint to v1.63.4 -- Dependencies upgrade: Bump golang.org/x/sys from 0.28.0 to 0.30.0 -- Dependencies upgrade: Bump github.com/moby/term v0.5.2 -- Dependencies upgrade: Bump github.com/otiai10/copy from 1.14.0 to 1.14.1 -- Dependencies upgrade: Bump github.com/jonboulle/clockwork from 0.4.0 to 0.5.0 -- Dependencies upgrade: Bump github.com/spf13/pflag from 1.0.5 to 1.0.6 -- Dependencies upgrade: Bump golang.org/x/sync from 0.10.0 to 0.11.0 -- Dependencies upgrade: Bump gotest.tools/v3 from 3.5.1 to 3.5.2 - -## 2.32.4 - -{{< release-date date="2025-01-16" >}} - -### Bug fixes and enhancements - -- Fixed an issue where the Compose version did not display properly when using `docker compose version` - -## 2.32.3 - -{{< release-date date="2025-01-13" >}} - -> [!NOTE] -> -> Binaries from the Compose GitHub repository may not display the version number properly. If you rely on `docker compose version` -> in your development or CI processes, upgrade to Compose version 2.32.4. - -### Bug fixes and enhancements - -- Fixed an issue where Compose would override a service-level MAC address with the main network MAC address -- Fixed a log rendering issue during concurrent builds - -## 2.32.2 - -{{< release-date date="2025-01-07" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.4.7 -- Dependencies upgrade: bump golang to v1.22.10 - -### Bug fixes and enhancements - -- Added `--pull` flag to the `docker compose run` command -- Fixed a bug which meant the `restart` action of `watch` mode didn't monitor bind mounts -- Fixed an issue recreating containers when using anonymous volumes - -## 2.32.1 - -{{< release-date date="2024-12-16" >}} - -### Bug fixes and enhancements - -- Fixed a bug recreating containers when not needed - -## 2.32.0 - -{{< release-date date="2024-12-13" >}} - -### Update - -- Dependencies upgrade: bump docker + buildx to latest release -- Dependencies upgrade: bump otel dependencies to v1.28.0 and v0.53.0 -- Dependencies upgrade: bump golang.org/x/sys 0.28.0 -- Dependencies upgrade: bump golang.org/x/crypto to 0.31.0 -- Dependencies upgrade: bump google.golang.org/grpc to 1.68.1 -- Dependencies upgrade: bump golang.org/x/sync 0.10.0 -- Dependencies upgrade: bump xx to v1.6.1 - -### Bug fixes and enhancements - -- Improved support when building with [Bake](/manuals/build/bake.md) -- Added `restart` and `sync+exec` watch actions -- Compose now recreates containers when the volume or network configuration changes -- Fixed support for `mac_address` -- Fixed `pull --quiet` to only hide progress, not global status -- Fixed an issue where only the `rebuild` watch action now requires a build declaration -- Compose now logs `watch` configuration error when enabled through the Compose menu - - -## 2.31.0 - -{{< release-date date="2024-11-28" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.4.5 -- Dependencies upgrade: bump docker engine and cli to v27.4.0-rc.2 -- Dependencies upgrade: bump buildx to v0.18.0 -- Dependencies upgrade: bump buildkit to v0.17.1 - -### Bug fixes and enhancements - -- Added the ability to use Docker Buildx Bake to build Docker Compose services -- Added `commit` command to create new images from running containers -- Fixed an issue where network changes were not detected -- Fixed an issue where containers stopped sequentially which slowed down the restart process - - -## 2.30.3 - -{{< release-date date="2024-11-07" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.4.4 - -### Bug fixes and enhancements - -- Fixed an issue re-starting services that should not when using `--watch` -- Improve the fix of using same YAML anchor multiple times in a Compose file - - -## 2.30.2 - -{{< release-date date="2024-11-05" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.4.3 - -### Bug fixes and enhancements - -- Fixed an issue re-creating services when updating its profiles -- Fixed a regression when using the same YAML anchor multiple times in a Compose file - -## 2.30.1 - -{{< release-date date="2024-10-30" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.4.2 - -### Bug fixes and enhancements - -- Fixed a regression when using stdin as input for `-f` flag -- Fixed a regression when using the same YAML anchor multiple times in a Compose file - -## 2.30.0 - -{{< release-date date="2024-10-29" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.4.1 -- Dependencies upgrade: bump docker engine and cli to v27.3.1 - -### Bug fixes and enhancements - -- Introduction of service hooks support. -- Addition of alpha `generate` command. -- Addition of `export` command. -- Added support for CDI device requests using `devices` in the Compose file. -- A lot a bug fixes. - -## 2.29.7 - -{{< release-date date="2024-09-20" >}} - - -### Bug fixes and enhancements - -- Fixed a regression when using mount API for bind mounts. - -## 2.29.6 - -{{< release-date date="2024-09-19" >}} - -### Update - -- Dependencies upgrade: bump docker engine and cli to v27.3.0-rc.2 - -### Bug fixes and enhancements - -- Fixed an issue with Windows Containers bind mounts. - -## 2.29.5 - -{{< release-date date="2024-09-17" >}} - -### Bug fixes and enhancements - -- Fixed an issue with bind mounts on WSL2. - -## 2.29.4 - -{{< release-date date="2024-09-16" >}} - -### Update - -- Dependencies upgrade: bump buildx to v0.17.1 -- Dependencies upgrade: bump docker engine and cli to v27.3.0-rc.1 - -### Bug fixes and enhancements - -- Fixed an issue with services not stopping when restarting diverged dependencies. -- Fixed potential `nil` pointer error on the OTEL client. - -## 2.29.3 - -{{< release-date date="2024-09-12" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.2.0 -- Dependencies upgrade: bump docker engine and cli to v27.2.1 - -### Bug fixes and enhancements - -- Combination of bind mount and `rebuild` are now allowed with `watch`. -- Fixed a bug recreating containers when `--no-deps` is used with `up`. -- Fixed a bug not closing streams when reattaching containers. -- Restored recreation of anonymous volumes when using `-V` or `--renew-anon-volumes`. - -## 2.29.2 - -{{< release-date date="2024-08-16" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.1.6 -- Dependencies upgrade: bump docker engine and cli to v27.1.2 -- Dependencies upgrade: bump buildx to v0.16.2 -- Dependencies upgrade: bump buildkit to v0.15.2 -- Dependencies upgrade: bump golang to v1.21.12 -- Dependencies upgrade: bump sys to v0.22.0 -- Dependencies upgrade: bump flock to v0.12.1 - -### Bug fixes and enhancements - -- Fixed the docs on `docker compose kill` usage. -- Fixed redundant condition from `toAPIBuildOptions` in build.go. -- Fixed initial Watch `sync` after Compose restarts with introduction of `x-initialSync`. -- Fixed an issue which stopped the Compose process for a single container on `sync-restart` Watch action. - -## 2.29.1 - -{{< release-date date="2024-07-23" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.1.5. -- Dependencies upgrade: bump docker engine and cli to v27.1.0. - -### Bug fixes and enhancements - -- Enhance JSON progress events with more fields. - - -## 2.29.0 - -{{< release-date date="2024-07-17" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.1.4 -- Dependencies upgrade: bump docker engine and cli to v27.0.3 -- Dependencies upgrade: bump buildx to 0.16.0 -- Dependencies upgrade: bump buildkit to 0.15.0 -- Dependencies upgrade: bump containerd to 1.7.19 - -### Bug fixes and enhancements - -- Added a JSON stream progress writer. -- Added a `--prune` flag to the `docker compose watch` command. -- Unnecessary resources are now excluded after services have been selected. -- Empty variables with no value are unset in containers. - -## 2.28.1 - -{{< release-date date="2024-06-24" >}} - -### Bug fixes and enhancements - -- Fixed progress display, broken in `v2.28.0`, when TTY mode available. - -## 2.28.0 - -{{< release-date date="2024-06-21" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.1.3 -- Dependencies upgrade: bump docker engine and cli to v27.0.1-rc.1 - -## 2.27.3 - -{{< release-date date="2024-06-21" >}} - -### Update - -- Dependencies upgrade: bump buildx to 0.15.1 -- Dependencies upgrade: bump buildkit to 0.14.1 - -## 2.27.2 - -{{< release-date date="2024-06-20" >}} - -### Update - -- Dependencies upgrade: bump golang to 1.21.11 -- Dependencies upgrade: bump docker engine and cli to v26.1.4 -- Dependencies upgrade: bump buildx to 0.15.0 -- Dependencies upgrade: bump buildkit to 0.14.0 -- Dependencies upgrade: bump containerd to 1.7.18 - -### Bug fixes and enhancements - -- Added an `--environment` flag to the `config` command -- Fixed a bug which caused the `watch` process to hang when used as flag with the `up` command -- Fixed usage of `COMPOSE_PROFILES` in `.env` file - -## 2.27.1 - -{{< release-date date="2024-05-24" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v2.1.1 -- Dependencies upgrade: bump docker engine and cli to v26.1.3 -- Dependencies upgrade: bump buildx to 0.14.1 -- Dependencies upgrade: bump containerd to 1.7.17 - -### Bug fixes and enhancements - -- Added a navigation menu in the CLI where you can open your Compose file in Docker Desktop -- Added documentation for `--menu` flag in `docker compose up` -- Fixed a bug with `--resolve-image-digests` used with `--no-interpolate` -- You can now use a local `.env` file to override `COMPOSE_*` environment variables - -## 2.27.0 - -{{< release-date date="2024-04-24" >}} - -### Update - -- Dependencies upgrade: bump golang to 1.21.9 -- Dependencies upgrade: bump compose-go to v2.1.0 -- Dependencies upgrade: bump docker engine and cli to v26.1.0 - -### Bug fixes and enhancements - -- Introduced `--abort-on-container-failure` flag -- Introduced `--all-resources` to not exclude resources not used by services -- Introduced support for `build.entitlements` -- Fixed a bug so Docker Compose now ignores missing containers when `docker compose down/stop -p` is run -- Fixed support for `--flag=value` syntax in compatibility mode - -## 2.26.1 - -{{< release-date date="2024-03-29" >}} - -### Update - -- Dependencies upgrade: opencontainers/image-spec v1.1.0 - -### Bug fixes and enhancements - -- Added image pull failure reason in output -- Fixed crash when running up with `--no-build` and `--watch` -- Fixed crash when no TTY available and menu enabled -- Improved legibility of menu actions - -## 2.26.0 - -{{< release-date date="2024-03-22" >}} - -### Update - -- Dependencies upgrade: bump compose-go v2.0.2 -- Dependencies upgrade: bump docker v26.0.0 - -### Bug fixes and enhancements - -- Reduced timeout of the Otel tracing command -- Fixed `config --format json` -- Fixed documentation on default build image name -- Introduced Synchronized file shares for bind mounts in Compose -- Added support for `annotations` -- Introduced `config --variables` to list Compose model variables -- Added a navigation menu within `docker compose up` - -## 2.25.0 - -{{< release-date date="2024-03-15" >}} - -### Update - -- Dependencies upgrade: bump compose-go v2.0.0 - -### Bug fixes and enhancements - -- Restored `config` behaviour until `--no-interpolate` is set -- Fixed service name shell completion -- Added `--watch` flag to `up` command - -## 2.24.7 - -{{< release-date date="2024-03-06" >}} - -### Update - -- Dependencies upgrade: bump golang to 1.21.8 -- Dependencies upgrade: bump compose-go to 2.0.0-rc8 -- Dependencies upgrade: bump docker to v24.0.4 - -### Bug fixes and enhancements - -- Compose now ensures stable priority sort order for networks -- Fixed interpolation with curly braces (e.g. JSON) in default values -- Fixed validation for non-unique `container_name` values -- Fixed validation for `develop.watch` -- Fixed environment loading for `include` -- Fixed panic when merging labels/networks -- Added support for `--no-path-resolution` when using `include` -- Fixed missing project name errors -- Fixed `--no-interpolate` flag on `config` -- Added a workaround for file lock issues with Watch mode on Windows -- Fixed duplicate exit code status messages -- Compose now respects `COMPOSE_REMOVE_ORPHANS` on `up` - -## 2.24.6 - -{{< release-date date="2024-02-15" >}} - -### Update - -- Dependencies upgrade: bump cli to 25.0.3 -- Dependencies upgrade: bump compose-go to 2.0.0-rc.7 - -### Bug fixes and enhancements - -- Fixed issue of `.env` file loading when project file is set via `COMPOSE_FILE` variable -- Aligned `ps --status=exited` behaviour with the Docker CLI behaviour -- Fixed a deadlock when collecting large logs - -## 2.24.5 - -{{< release-date date="2024-01-30" >}} - -### Bug fixes and enhancements - -- Fixed "failed to solve: changes out of order" errors when building images on Windows. - -## 2.24.4 - -{{< release-date date="2024-01-29" >}} - -### Update - -- Dependencies upgrade: bump cli to 25.0.1 -- Dependencies upgrade: bump docker to 25.0.1 -- Dependencies upgrade: bump compose-go to 2.0.0-rc.3 - -### Bug fixes and enhancements - -- Fixed issue when checking external network existence when swarm is enabled. -- Added support for `storage_opt` attribute. - -## 2.24.3 - -{{< release-date date="2024-01-24" >}} - -This release fixes a build issue with Docker Desktop for Windows introduced in Compose v2.24.0. - -### Update - -- Compose now uses a custom version of `fsutils` library. - -## 2.24.2 - -{{< release-date date="2024-01-22" >}} - -### Update - -- Dependencies upgrade: bump cli to 25.0.0 GA -- Dependencies upgrade: bump compose-go to 2.0.0-rc.2 - -## 2.24.1 - -{{< release-date date="2024-01-18" >}} - -### Update - -- Dependencies upgrade: bump cli to 25.0.0-rc3 -- Dependencies upgrade: bump docker to 25.0.0-rc3 -- Dependencies upgrade: bump compose-go to 2.0.0-rc.1 -- Dependencies upgrade: bump containerd to 1.7.12 - -### Bug fixes and enhancements - -- Reworked the display of container status during `up` -- Fixed the engine version required to use `healthcheck.start_interval` -- Removed `watch` subcommand from the `alpha` command -- Fixed a bug when handling received signals - -## 2.24.0 - -{{< release-date date="2024-01-11" >}} - -### Update - -- Dependencies upgrade: bump cli to 25.0.0-beta.3 -- Dependencies upgrade: bump compose-go to 2.0.0-beta.3 -- Dependencies upgrade: bump golang to 1.21.6 - -### Bug fixes and enhancements - -- Introduced `docker compose attach` to attach local standard input, output, and error streams to a service's running container. -- Introduced `docker compose stats` to display a live stream of container(s) resource usage statistics. -- Introduced `docker compose ps --orphans` to include/exclude services not declared. -- Introduced `docker compose logs --index` to select a replica container. -- Introduced `docker compose build --with-dependencies` to also build dependencies. -- Added source policies for build. -- Included disabled services for shell completion. -- Restored `Project` in ps JSON output. -- Added OCI 1.0 fallback support for AWS ECR. -- Build now does not require environment to be resolved. -- Compose now sends out a cancel event on SIGINT/SIGTERM signal for `compose up`. -- Fixed log by exposing services ports when `--verbose`. -- Fixed inlined and environment-defined configs to be mounted under /\ until an explicit target is set. -- Fixed combination of `--pull always --no-build`. -- Fixed race condition in log printer. -- Fixed `docker compose up` teardown when command context is cancelled. - -## 2.23.3 - -{{< release-date date="2023-11-22" >}} - -### Update - -- Dependencies upgrade: bump buildx to v0.12.0 - -## 2.23.2 - -{{< release-date date="2023-11-21" >}} - -### Update - -- Dependencies upgrade: bump buildkit 0.12.3 -- Dependencies upgrade: bump docker 24.0.7 -- Dependencies upgrade: bump cli 24.0.7 -- Dependencies upgrade: bump 1.20.2 - -### Bug fixes and enhancements - -- Compose now supports `builds.tags` with `push` command. -- Compose Watch now re-builds service images at startup. -- Now `--remove-orphans` doesn't manage disabled services as orphaned. -- Compose displays `Building` output log only if there is at least one service to build. - -## 2.23.1 - -{{< release-date date="2023-11-16" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v1.20.1 - -### Bug fixes and enhancements - -- Aligned Compose with OCI artifact best practices. -- Introduced `--resolve-image-digests` so users can seal service images by digest when publishing a Compose application. -- Improved Compose Watch configuration logging. -- Compose now rejects a Compose file using `secrets|configs.driver` or `template_driver`. -- Compose now fails to start if a dependency is missing. -- Fixed SIGTERM support to stop/kill stack. -- Fixed a `--hash` regression. -- Fixed "Application failed to start after update" when an external network is on a watched service. -- Fixed `--pull` documentation. -- Fixed display by adding newline in cmd/compose/build.go. -- Compose is rendered quiet after filtering applied. -- Stripped project prefix from docker-compose up output. - -## 2.23.0 - -{{< release-date date="2023-10-18" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v1.20.0 -- Dependencies upgrade: bump containerd to 1.7.7 - -### Bug fixes and enhancements - -- Added dry-run support for publish command -- Added `COMPOSE_ENV_FILES` env variable to pass a list of env files -- Added `sync+restart` action to `compose watch` -- Aligned `compose ps` output with Docker CLI by default and introduced `--no-trunc` to keep the previous behaviour -- Fixed hashes inconsistency between `up` and `configure` -- Enabled profiles when `down` ran with explicit service names -- Fixed an issue when the pull policy provided was invalid - -## 2.22.0 - -{{< release-date date="2023-09-21" >}} - -> [!NOTE] -> -> The `watch` command is now generally available (GA). You can directly use it from the root command `docker compose watch`. -> For more information, see [File watch](/manuals/compose/how-tos/file-watch.md). - -### Update - -- Dependencies upgrade: bump golang to 1.21.1 -- Dependencies upgrade: bump compose-go to v1.19.0 -- Dependencies upgrade: bump buildkit to v0.12.2 - -### Bug fixes and enhancements - -- Added experimental support for the `publish` command. -- The command `watch` now builds and launches the project during startup. -- Added `policy` option to the `--pull` flag. -- Fixed various race and deadlock conditions for `up` command on exit. -- Fixed multi-platform issues on build. -- Enabled services that are explicitly requested even when their `profiles` aren't activated. -- Fixed a `config` issue when the declared `env_file` is missing. -- Passed BuildOptions to `up` and `run` commands. - -## 2.21.0 - -{{< release-date date="2023-08-30" >}} - -> [!NOTE] -> -> The format of `docker compose ps` and `docker compose ps --format=json` changed to better align with `docker ps` output. See [compose#10918](https://github.com/docker/compose/pull/10918). - -### Update - -- Dependencies upgrade: bump compose-go to v1.18.3 - -### Bug fixes and enhancements - -- Changed `docker compose ps` and `docker compose ps --format=json` output to align with Docker CLI. -- Added support for multi-document YAML files. -- Added support for loading remote Compose files from Git repos with `include` (experimental). -- Fixed incorrect proxy variables during build. -- Fixed truncated container logs on container exit. -- Fixed "no such service" errors when using `include` with `--profile`. -- Fixed `.env` overrides when using `include`. - -## 2.20.3 - -{{< release-date date="2023-08-11" >}} - -### Update - -- Dependencies upgrade: bump golang to 1.21.0 -- Dependencies upgrade: bump compose-go to v1.18.1 -- Dependencies upgrade: bump buildkit to v0.12.1 - -### Bug fixes and enhancements - -- Improved speed and reliability of `watch` sync. -- Added builder's name on the first build line. -- Improved shell completion for `--project-directory` and `--profile`. -- Fixed build issue with proxy configuration not passing to legacy builder. -- Removed unnecessary warning when an option dependency exists successfully. - -## 2.20.2 - -{{< release-date date="2023-07-19" >}} - -### Bug fixes and enhancements - -- Added support for the `depends_on.required` attribute. -- Fixed an issue where build tries to push unnamed service images. -- Fixed a bug which meant the target secret path on Windows was not checked. -- Fixed a bug resolving build context path for services using `extends.file`. - -## 2.20.1 - -{{< release-date date="2023-07-18" >}} - -### Update - -- Dependencies upgrade: bump golang to 1.20.6 -- Dependencies upgrade: bump buildx to v0.11.2 -- Dependencies upgrade: bump buildkit to v0.12 -- Dependencies upgrade: bump docker-cli to v24.0.5-dev - -## 2.20.0 - -{{< release-date date="2023-07-11" >}} - -### Update - -- Dependencies upgrade: bump docker/cli-docs-tools to v0.6.0 -- Dependencies upgrade: bump docker to v24.0.4 -- Dependencies upgrade: bump buildx to v0.11.1 - -### Bug fixes and enhancements - -* Introduced the `wait` command. -* Added support of `--builder` and `BUILDX_BUILDER` to the `build` command. -* Added support for the `include` and `attach` attributes from the Compose Specification. -* Fixed a DryRun mode issue when initializing CLI client. -* Fixed a bug with random missing network when a service has more than one. -* Fixed the Secrets file permission value to comply with the Compose Specification. -* Fixed an issue about `no-deps` flag not being applied. -* Fixed some source code comments. -* Fixed a bug when `--index` is not set select. -* Fixed a process leak in the wait e2e test. -* Improved some test speeds. - -## 2.19.1 - -{{< release-date date="2023-06-29" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v1.15.1 - -### Bug fixes and enhancements - -- Fixed sporadic "container not connected to network" errors on `compose up`. -- Fixed "please specify build context" errors on `compose build`. -- Compose now warns if using a bind mount in a service `watch` configuration. - -## 2.19.0 - -{{< release-date date="2023-06-21" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v1.15.0 -- Dependencies upgrade: bump buildx to v0.11.0 -- Dependencies upgrade: bump docker to v24.0.2 -- Dependencies upgrade: bump golang to 1.20.5 - -### Bug fixes and enhancements - -- Introduced the ability to select a single service to be stopped by `compose down`. -- Added `--progress` as top-level flag to configure progress UI style. -- Introduced `run --cap-add` to run maintenance commands using service image. -- Fixed a bug during detection of swarm mode. -- Fixed a bug when setting the project name via `COMPOSE_PROJECT_NAME` environment variable. -- Adjusted the display of the volumes flag with the help of `down` command. -- Fixed a bug in the `up` command which should not silently ignore missing `depends_on` services. -- Aligned forward signal to container behaviour with the `docker run` one. -- Compose now detects network name conflict. -- Fixed a typo in the warning message about an existing volume. -- Compose now detects new services started after `compose -p x logs -f` command. -- Fixed a bug when `compose` was used as project name. -- Fixed a bug in the `watch` command when a directory does not exist. -- Removed default timeout of 10 seconds when restarting or stopping services. -- Fixed a bug in `watch` which applied the "rebuild" strategy by default. -- Fixed a race condition, waiting for containers when one exit. -- Added a warning telling users that uid,gid,mode are not implemented for `build.secrets`. -- Fixed a bug in `watch` which was watching the whole build context instead of only configured paths. -- Compose now sorts containers by creation date to scale down the older ones first. -- Fixed a bug in the docs generation task for Windows environments. -- Updated the docs to reflect Dry Run mode is feature complete. -- Improved the diagnostic message on network label mismatch. -- Fixed a bug which was rendering `Building` section when there was no build involved. -- Fixed a bug in code coverage metrics. -- Added OTEL initialization. -- Added a GitHub action to trigger Docker Desktop e2e tests with Compose edge versions. -- Added more ignore rules to dependabot. - -## 2.18.1 - -{{< release-date date="2023-05-17" >}} - -### Bug fixes and enhancements - -- Fixed "Image not found" errors when building images - -## 2.18.0 - -{{< release-date date="2023-05-16" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v1.13.5 -- Dependencies upgrade: bump buildkit to v0.11.6 -- Dependencies upgrade: bump docker to v23.0.5 - -### Bug fixes and enhancements - -- Added dry run support using `--dry-run` -- Added the first (alpha) implementation of the `viz` sub-command -- Introduced `--no-path-resolution` to skip relative path to be resolved -- Introduced `COMPOSE_ANSI` to define the `--ansi` default value -- Introduced `COMPOSE_STATUS_STDOUT` to get status messages sent to stdout -- Fixed the BuildKit progressui integration -- Fixed a bug to stop blocking the events loop collecting logs -- Restored support for `--memory` -- Fixed a bug which meant containers didn't stop after termination -- Compose now lets users declare the build secret target -- Fixed a bug which caused a container to be recreated when the config has not changed -- Fixed a race condition when `--parallel` is used with a large number of dependent services -- Compose now checks the local image matches the required platform -- Fixed local image removal when `compose down` is ran with `--project-name` -- Compose now detects the active endpoint trying to remove the network and skips with a warning -- Removed unnecessary [] output -- Compose detects that a Windows terminal is not a `console.File` to avoid a panic -- `--parallel` now has precedence over `COMPOSE_PARALLEL_LIMIT` -- Compose now reports that the external network is not found when Swarm is disabled - -## 2.17.2 - -{{< release-date date="2023-03-26" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v1.13.2 - -### Bug fixes and enhancements - -- Fixed invalid project name error for directories with uppercase characters or `.` in the name. Fixed [compose#10405](https://github.com/docker/compose/issues/10405) - -## 2.17.1 - -{{< release-date date="2023-03-24" >}} - -### Update - -- Dependencies upgrade: bump buildkit to v0.11.5 -- Dependencies upgrade: bump compose-go to v1.13.1 -- Dependencies upgrade: bump golang to 1.20.2 - -### Bug fixes and enhancements - -- Fixed panic on `alpha watch` command. Pull Request [compose#10393](https://github.com/docker/compose/pull/10393) -- Prevented conflicts for services named `extensions`. Fixed [compose-go#247](https://github.com/compose-spec/compose-go/issues/247) -- Compose now validates project names more consistently. Fixed [compose-go#363](https://github.com/compose-spec/compose-go/issues/363) - -## 2.17.0 - -{{< release-date date="2023-03-23" >}} - -### Upgrade notes - -- Project name validation is more strictly enforced. Project names can only include letters, numbers, `_`, `-` and must be lowercase and start with a letter or number. -- Boolean fields in YAML must be either `true` or `false`. Deprecated YAML 1.1 values such as "on" or "no" are not supported. -- Duplicate YAML merge keys (`<<`) are rejected. - -### Update - -- Dependencies upgrade: bump buildkit to v0.11.4 -- Dependencies upgrade: bump buildx to v0.10.4 -- Dependencies upgrade: bump containerd to 1.6.18 -- Dependencies upgrade: bump compose-go to v1.13.0 - -### Bug fixes and enhancements - -* Introduced `--wait-timeout` on `up` command. Fixed [compose#10269](https://github.com/docker/compose/issues/10269) -* Made `compose service --hash` output sort by service name. Pull Request [compose#10278](https://github.com/docker/compose/pull/10278) -* Compose now renders a compact TUI progress report to monitor layers download. Pull Request [compose#10281](https://github.com/docker/compose/pull/10281) -* Introduced `restart` for `depends_on`. Fixed [compose#10284](https://github.com/docker/compose/issues/10284) -* Added support of `NO_COLOR` env var. Fixed [compose#10340](https://github.com/docker/compose/issues/10340) -* Progress writer now uses `dockercli.Err` stream. Fixed [compose#10366](https://github.com/docker/compose/issues/10366) -* Added support for `additional_contexts` in the `build` service configuration. Fixed [compose#9461](https://github.com/docker/compose/issues/9461) [compose#9961](https://github.com/docker/compose/issues/9961) -* Added file delete/rename handling in `watch` mode. Pull Request [compose#10386](https://github.com/docker/compose/pull/10386) -* Introduced an `ignore` attribute in `watch` mode. Pull Request [compose#10385](https://github.com/docker/compose/pull/10385) -* Compose now uses progress writer to show copies status. Pull Request [compose#10387](https://github.com/docker/compose/pull/10387) -* Updated reference documentation for `-p`/`--project-name` flag. Fixed [docs#16915](https://github.com/docker/docs/pull/16915), [compose-spec#311](https://github.com/compose-spec/compose-spec/issues/311) -* Introduced a `replace` label to track the relationship between old and new containers of a service. Fixed [compose#9600](https://github.com/docker/compose/issues/9600) -* Fixed a bug that meant dependent services were not restarted after a service was restarted. Fixed [compose#10263](https://github.com/docker/compose/issues/10263) -* Compose now ignores services without a build section in `watch` mode. Fixed [compose#10270](https://github.com/docker/compose/issues/10270) -* Compose now applies config options for pseudo-subcommands. Fixed [compose#10286](https://github.com/docker/compose/issues/10286) -* Compose manages only containers with config_hash labels (i.e, created by compose). Fixed [compose#10317](https://github.com/docker/compose/issues/10317) -* Compose triggers an error if the project name is empty after normalization. Fixed [compose#10313](https://github.com/docker/compose/issues/10313) -* Compose restarts only needed services by checking `depends_on` relations. Fixed [compose#10337](https://github.com/docker/compose/issues/10337) -* Fixed a display issue on small terminals. Fixed [compose#10322](https://github.com/docker/compose/issues/10322) -* Fixed an issue with building the built images IDs collection. Pull Request [compose#10372](https://github.com/docker/compose/issues/10372) -* Use configured name separator to define oneoff container name. Fixed [compose#10354](https://github.com/docker/compose/issues/10354) -* Fixed concurrent map read/write issue when recreating containers. Fixed [compose#10319](https://github.com/docker/compose/issues/10319) -* Compose now supports Dry Run mode for `stop` and `rm` commands. Pull Request [compose#10257](https://github.com/docker/compose/issues/10257) -* Compose now supports Dry Run mode for `pull` command. Pull Request [compose#10341](https://github.com/docker/compose/issues/10341) -* Compose now supports Dry Run mode for `push` command. Pull Request [compose#10355](https://github.com/docker/compose/issues/10355) -* Compose now supports Dry Run mode for `exec` command. Pull Request [compose#10252](https://github.com/docker/compose/issues/10252) -* Compose now supports Dry Run mode for `restart` command. Pull Request [compose#10339](https://github.com/docker/compose/issues/10339) - -## 2.16.0 - -{{< release-date date="2023-02-08" >}} - -### Update - -- Dependencies upgrade: bump docker to v23.0.0 -- Dependencies upgrade: bump docker-cli to v23.0.0 -- Dependencies upgrade: bump buildkit to v0.11.2 -- Dependencies upgrade: bump buildx to v0.10.2 -- Dependencies upgrade: bump containerd to 1.6.16 -- Dependencies upgrade: bump golang to 1.20 - -### Bug fixes and enhancements - -* Introduced `--remove-orphans` for the `compose create` command. Fixed [compose#9718](https://github.com/docker/compose/issues/9718) -* Shortened the TTY output when the terminal is too small. Fixed [compose#9962](https://github.com/docker/compose/issues/9962) -* Added `remove-orphans` functionality to run. Fixed [compose#9718](https://github.com/docker/compose/issues/9718#issuecomment-1209448445) -* Introduced the experimental `watch` command. Pull Request [compose#10163](https://github.com/docker/compose/pull/10163) -* Compose now allows TTY to be allocated with `-t`. Fixed [compose#10161](https://github.com/docker/compose/issues/10161) -* Introduced the experimental `dry-run` command. Pull Request [compose#10173](https://github.com/docker/compose/issues/10173) -* Updated the documentation to explain ways to configure parallelism. Pull Request [compose#10198](https://github.com/docker/compose/issues/10198) -* Aligned the `logs` command with docker CLI by aliasing `-n` for `--tail`. Fixed [compose#10199](https://github.com/docker/compose/issues/10199) -* Added support for `docker compose build --push`. Pull Request [compose#10148](https://github.com/docker/compose/issues/10148) -* Added `--scale` to the `compose create` command. Fixed [compose#10208](https://github.com/docker/compose/issues/10208) -* Renamed `convert` to `config` to align with the Compose V1 UX. Pull Request [compose#10214](https://github.com/docker/compose/issues/10214) -* Compose now passes the proxy config as build args. Fixed [compose#8797](https://github.com/docker/compose/issues/8797) -* Fixed parsing issue in `compose up` by ignoring containers not created by Compose. Fixed [compose#10162](https://github.com/docker/compose/issues/10162#issuecomment-1384989985) -* Fixed the goroutine leak in log formatter initialization. Fixed [compose#10157](https://github.com/docker/compose/issues/10157) -* Fixed an issue where compose logs don't exit when all running containers have been stopped. Pull Request [compose#10181](https://github.com/docker/compose/issues/10181) -* Fixed the documentation to reflect `docker compose ps` being aligned with `docker ps`. Pull Request [compose#10195](https://github.com/docker/compose/issues/10195) -* Fixed an issue where the remote Buildx driver was not found. Fixed [compose#9893](https://github.com/docker/compose/issues/9893) -* Improved logging when recreating a service container. Pull request [compose#10236](https://github.com/docker/compose/issues/10236) -* Fixed an issue so Compose now only waits for containers concerned by the wait condition. Fixed [compose#10200](https://github.com/docker/compose/issues/10200) -* Compose now prevents assignment to entry in nil map. Fixed [compose#10244](https://github.com/docker/compose/issues/10244) -* Added a dedicated GitHub Action workflow for Cucumber tests. Pull Request [compose#10165](https://github.com/docker/compose/issues/10165) -* Cleaned the TUI lines when switching in compact log mode. Fixed [compose#10201](https://github.com/docker/compose/issues/10201) -* Added Tilt watcher to detect code changes in watch mode. Pull Request [compose#10218](https://github.com/docker/compose/issues/10218) -* Compose now supports Dry Run mode for `kill` command. Fixed [compose#10210](https://github.com/docker/compose/issues/10210) -* Compose now supports Dry Run mode for `pause` command.Fixed [compose#10217](https://github.com/docker/compose/issues/10217) -* Compose now supports Dry Run mode for `cp` command.Fixed [compose#10235](https://github.com/docker/compose/issues/10235) - -## 2.15.1 - -{{< release-date date="2023-01-09" >}} - -### Update - -- Dependencies upgrade to fix Golan CVE-2022-27664 and CVE-2022-32149 - -### Bug fixes and enhancements - -* Added support for UTS namespace. Fixed [compose#8408](https://github.com/docker/compose/issues/8408) -* Fixed filtering issue when no filter set. Fixed [roadmap#418](https://github.com/docker/roadmap/issues/418) -* Fixed concurrent map writes issue during build step. Pull Request [compose#10151](https://github.com/docker/compose/pull/10151) -* Fixed issue when stdin is not a terminal. Fixed [compose#9739](https://github.com/docker/compose/issues/9739) - -## 2.15.0 - -{{< release-date date="2023-01-05" >}} - -### Update - -- Dependencies upgrade: bump compose-go to v1.8.1 -- Dependencies upgrade: bump cli-docs-tool to 0.5.1 - -### Bug fixes and enhancements - -* Added support of the `privileged` attribute in the `service.build` section. Pull Request [compose#10112](https://github.com/docker/compose/pull/10112) -* Introduced `--ignore-buildable` to ignore buildable images on pull. Fixed [compose#8805](https://github.com/docker/compose/issues/8805) -* Introduced `--no-attach` to ignore some service outputs. Fixed [compose#8546](https://github.com/docker/compose/issues/8546) -* Fixed issue with `logs` when `driver:none` is set. Fixed [compose#9030](https://github.com/docker/compose/issues/9030) -* Compose now relies on dockerCLI.streams. Pull Request [compose#10082](https://github.com/docker/compose/pull/10082) -* Fixed issue with service hash that MUST exclude replicas. Fixed [compose#10077](https://github.com/docker/compose/issues/10077) -* Compose now checks service names based on project, not running containers. Fixed [compose#9951](https://github.com/docker/compose/issues/9951) -* Fixed security opts support (seccomp and unconfined). Fixed [compose#9505](https://github.com/docker/compose/issues/9505) -* Fixed empty file when using compose config in case of smaller source files. Fixed [compose#10121](https://github.com/docker/compose/issues/10121) -* Fixed issue with `--pull` not applied on `compose up`. Fixed [compose#10125](https://github.com/docker/compose/issues/10125) -* Compose should ignore not only auto-removed containers but also "removal in progress" for orphan containers. Pull Request [compose#10136](https://github.com/docker/compose/pull/10136) -* Compose limits build concurrency according to `--parallel`. Fixed [compose#9091](https://github.com/docker/compose/issues/9091) - -## 2.14.2 - -{{< release-date date="2022-12-20" >}} - -### Update - -- Dependencies upgrade: bump containerd to 1.6.14 - -### Bug fixes and enhancements - -* Compose now uses DOCKER_DEFAULT_PLATFORM to determine the platform when creating a container. Fixed [compose#10041](https://github.com/docker/compose/pull/10041) -* Compose now detects when dependency failed to start. Fixed [compose#9732](https://github.com/docker/compose/pull/9732) -* Fixed WCOW volume mounts. Fixed [compose#9577](https://github.com/docker/compose/pull/9577) -* List only running containers when using `--all=false`. Fixed [compose#10085](https://github.com/docker/compose/pull/10085) -* Fixed a regression when running pull `--ignore-pull-failures`. Fixed [compose#10089](https://github.com/docker/compose/pull/10089) -* Fixed CPU quota issue. Fixed [compose#10073](https://github.com/docker/compose/pull/10073) -* Fixed race condition on compose logs. Fixed [compose#8880](https://github.com/docker/compose/pull/8880) -* Updated projectOptions to be public by renaming it to ProjectOptions. Fixed [compose#100102](https://github.com/docker/compose/pull/100102) - -## 2.14.1 - -{{< release-date date="2022-12-15" >}} - -### Updates - -- Dependencies upgrade: bump Go to 1.19.4 -- Dependencies upgrade: bump containerd to 1.6.12 - -### Bug fixes and enhancements - -- Added `--parallel` to limit concurrent engine calls. Pull Request [compose#10030](https://github.com/docker/compose/pull/10030) -- Distinguished stdout and stderr in `up` logs. Fixed [compose#8098](https://github.com/docker/compose/issues/8098) -- Aligned `compose ps` output with `docker ps`. Fixed [compose#6867](https://github.com/docker/compose/issues/6867) -- Added `--include-deps` to push command. Pull Request [compose#10044](https://github.com/docker/compose/pull/10044) -- Introduced `--timestamp` option on `compose up`. Fixed [compose#5730](https://github.com/docker/compose/issues/5730) -- Compose now applies uid/gid when creating a secret from the environment. Pull Request [compose#10084](https://github.com/docker/compose/pull/10084) -- Fixed deadlock when waiting for attached-dependencies. Fixed [compose#10021](https://github.com/docker/compose/pull/10021) -- Fixed race condition when collecting pulled images IDs. Fixed [compose#9897](https://github.com/docker/compose/pull/9897) -- Compose doesn't stop the `pull` command for images that can be built. Fixed [compose#8724](https://github.com/docker/compose/pull/8724) -- Fixed corner case when there's no container to attach to. Fixed [compose#8752](https://github.com/docker/compose/pull/8752) -- Compose containers' startup must run sequentially for engine to assign distinct ports within a configured range. Fixed -[compose#8530](https://github.com/docker/compose/pull/8530) -- Fixed parsing of `repository:tag`. Fixed [compose#9208](https://github.com/docker/compose/pull/9208) -- Load project from files when explicitly set by user. Fixed [compose#9554](https://github.com/docker/compose/pull/9554) - -## 2.14.0 - -{{< release-date date="2022-12-02" >}} - -### Updates - -- Dependencies upgrade: bump compose-go to [v1.8.0](https://github.com/compose-spec/compose-go/releases/tag/v1.8.0) -- Dependencies upgrade: bump Go to 1.19.3 - -### Bug fixes and enhancements - -- Added `oom_score_adj` field to service definition. Pull Request [compose#10019](https://github.com/docker/compose/issues/10019) -- Added mode field for tmpfs mount permissions. Pull Request [compose#10031](https://github.com/docker/compose/issues/10031) -- Compose now only stops services started by `up` when interrupted. Fixed [compose#10028](https://github.com/docker/compose/issues/10028) -- Compose now loads implicit profiles for targeted services. Fixed [compose#10025](https://github.com/docker/compose/issues/10025) -- Compose does not require `service.build.platforms` to be set if `service.platform` is set. Fixed [compose#10017](https://github.com/docker/compose/issues/10017) -- Plain output is used during buildx image builds if `--ansi=never` is set. Fixed [compose#10020](https://github.com/docker/compose/issues/10020) -- `COMPOSE_IGNORE_ORPHANS` environment variable now behaves more consistently. Fixed [compose#10035](https://github.com/docker/compose/issues/10035) -- Compose now uses the correct image name separator in `convert`. Fixed [compose#9904](https://github.com/docker/compose/issues/9904) -- Fixed `run` for services using `network_mode: service:NAME`. Fixed [compose#10036](https://github.com/docker/compose/issues/10036) - -## 2.13.0 - -{{< release-date date="2022-11-23" >}} - -### Updates - -- Dependencies upgrade: bump containerd to 1.6.10 -- Dependencies upgrade: bump docker-credential-helpers to v0.7.0 -- Update CI dependencies. Pull Request [compose#9982](https://github.com/docker/compose/pull/9982) - -### Bug fixes and enhancements - -- Added a `no-consistency` option to `convert` command. Fixed [compose#9963](https://github.com/docker/compose/issues/9963) -- Added a `build` option to `run` command. Fixed [compose#10003](https://github.com/docker/compose/issues/10003) -- Fixed mapping `restart_policy.condition` to engine supported values. Fixed [compose#8756](https://github.com/docker/compose/issues/8756), [docs#15936](https://github.com/docker/docs/pull/15936) -- Fixed missing support of `deploy.reservation.memory`. Fixed [compose#9902](https://github.com/docker/compose/issues/9902) -- Fixed a bug to prevent usage of `COMPOSE_PROFILES` when `--profile` arg is used. Fixed [compose#9895](https://github.com/docker/compose/issues/9895) -- Fixed a bug to prevent pulling a service's image when depending on a service which will build this image. Fixed [compose#9983](https://github.com/docker/compose/issues/9983) -- Fixed parsing issue when a container number label is not found. Fixed [compose#10004](https://github.com/docker/compose/issues/10004) -- Compose now uses the platform value defined by `DOCKER_DEFAULT_PLATFORM` when no `service.platform` defined. Fixed [compose#9889](https://github.com/docker/compose/issues/9889) -- Removed usage of the deprecated dependency `gotest.tools` v2. Pull Request [compose#9935](https://github.com/docker/compose/pull/9935) -- Excluded issues labeled with `kind/feature` from stale bot process. Fixed [compose#9988](https://github.com/docker/compose/pull/9988) - -## 2.12.2 - -{{< release-date date="2022-10-21" >}} - -### Updates - -- Updated Docker Engine API to restore compatibility with Golang 1.18 needed for Linux packaging. Pull Request [compose#9940](https://github.com/docker/compose/pull/9940) - -For the full change log or additional information, check the [Compose repository 2.12.2 release page](https://github.com/docker/compose/releases/tag/v2.12.2). - -## 2.12.1 - -{{< release-date date="2022-10-21" >}} - -### Security - -- Updated Docker Engine API to apply fix of [CVE-2022-39253](https://nvd.nist.gov/vuln/detail/CVE-2022-39253). Pull Request [compose#9934](https://github.com/docker/compose/pull/9934) - -For the full change log or additional information, check the [Compose repository 2.12.1 release page](https://github.com/docker/compose/releases/tag/v2.12.1). - -## 2.12.0 - -{{< release-date date="2022-10-18" >}} - -### Updates - -- CI update to the documentation repository path -- Upgraded to compose-go from [1.5.1 to 1.6.0](https://github.com/compose-spec/compose-go/releases/tag/v1.6.0) - -- Updated to go 1.19.2 to address CVE-2022-2879, CVE-2022-2880, CVE-2022-41715 - -### Bug fixes and enhancements - -- Added a `quiet` option when pushing an image. Fixed [compose#9089](https://github.com/docker/compose/issues/9089) -- Fixed a misleading error message for `port` command. Pull Request [compose#9909](https://github.com/docker/compose/pull/9909) -- Fixed a bug to prevent failure when Compose tries to remove a non-existing container. Fixed by [compose#9896](https://github.com/docker/compose/pull/9896/) -- Switched GitHub issue template form - -For the full change log or additional information, check the [Compose repository 2.12.0 release page](https://github.com/docker/compose/releases/tag/v2.12.0). - -## 2.11.2 - -{{< release-date date="2022-09-27" >}} - -> [!NOTE] -> -> - Updates on environment file syntax & interpolation: see [compose#9879](https://github.com/docker/compose/issues/9879) -> - Setting `DOCKER_HOST` via `.env` files is not supported in Compose v2 - -### Updates - -- Upgraded to compose-go from [1.5.1 to 1.6.0](https://github.com/compose-spec/compose-go/releases/tag/v1.6.0) - -### Bug fixes and enhancements - -- Fixed a bug to prevent "invalid template" errors on valid environment variable values. Fixes [compose##9806](https://github.com/docker/compose/issues/9806), [compose##9746](https://github.com/docker/compose/issues/9746), [compose##9704](https://github.com/docker/compose/issues/9704), [compose##9294](https://github.com/docker/compose/issues/9294) -- Fixed a bug to ensure new images from `docker compose build` are used. Fixes [compose#9856](https://github.com/docker/compose/issues/9856) -- Fixed cross-architecture builds when `DOCKER_DEFAULT_PLATFORM` not set. Fixes [compose#9864](https://github.com/docker/compose/pull/9864) -- Fixed intermittent conflict errors when using `depends_on`. Fixes [compose#9014](https://github.com/docker/compose/issues/9014) -- Cleared service `CMD` when entry point is overridden. Fixes [compose#9622](https://github.com/docker/compose/issues/9622) -- Configured default builder export when no `build.platforms` defined. Fixes [compose#9856](https://github.com/docker/compose/issues/9856) -- Fixed a bug to keep the platform defined, in priority, via DOCKER_DEFAULT_PLATFORM or the `service.platform` attribute. Fixes [compose#9864](https://github.com/docker/compose/issues/9864) -- Removed support for `DOCKER_HOST` in `.env` files. Fixes [compose#9210](https://github.com/docker/compose/issues/9210) -- Fixed a bug to ensure clean service command if entry point is overridden in run command. Fixes [compose#9622](https://github.com/docker/compose/issues/9622) -- Deps: fixed race condition during graph traversal. Fixes [compose#9014](https://github.com/docker/compose/issues/9014) -- CI now runs on Windows & macOS including E2E tests via Docker Desktop -- Added more information when `service.platform` isn't part of `service.build.platforms` -- GitHub Workflows security hardening - -For the full change log or additional information, check the [Compose repository 2.11.2 release page](https://github.com/docker/compose/releases/tag/v2.11.2). - -## 2.11.1 - -{{< release-date date="2022-09-20" >}} - -### Bug fixes and enhancements - -- Fixed a bug to keep `depends_on` condition when service has `volumes_from`. Fixes [compose#9843](https://github.com/docker/compose/issues/9843) -- Fixed a bug to keep the platform defined at service level during build if no build platforms. Fixes [compose#9729](https://github.com/docker/compose/pull/9729#issuecomment-1246748144) -- Fixed a bug to keep the platform defined via DOCKER_DEFAULT_PLATFORM during build if no build platforms provided. Fixes [compose#9853](https://github.com/docker/compose/issues/9853) - -For the full change log or additional information, check the [Compose repository 2.11.1 release page](https://github.com/docker/compose/releases/tag/v2.11.1). - -## 2.11.0 - -{{< release-date date="2022-09-14" >}} - -### Updates - -- Dependencies upgrade: bump Golang to 1.19.1 -- Dependencies upgrade: bump github.com/docker/go-units from 0.4.0 to 0.5.0 -- Dependencies upgrade: bump github.com/cnabio/cnab-to-oci from 0.3.6 to 0.3.7 -- Dependencies upgrade: bump go.opentelemetry.io/otel from 1.9.0 to 1.10.0 -- Dependencies upgrade: bump github.com/AlecAivazis/survey/v2 from 2.3.5 -- Dependencies upgrade: bump go.opentelemetry.io/otel from 1.4.1 to 1.9.0 -- Dependencies upgrade: bump compose-go from [1.5.0 to 1.5.1](https://github.com/compose-spec/compose-go/releases/tag/v1.5.1) - -### Bug fixes and enhancements - -- Added platforms build. Fixes [compose-spec#267](https://github.com/compose-spec/compose-spec/pull/267) -- Logs now filter to services from current Compose file. Fixes [compose#9801](https://github.com/docker/compose/issues/9801) -- Added an improved output warning when pulling images. Fixes [compose#9820](https://github.com/docker/compose/issues/9820) -- Fixed a bug to ensure correct capture of exit code when service has dependencies. Fixes [compose#9778](https://github.com/docker/compose/issues/9778) -- Fixed `down` with `--rmi`. Fixes [compose#9655](https://github.com/docker/compose/issues/9655) -- Fixed docker-compose convert that turns $ into $$ when using the --no-interpolate option. Fixes [compose#9160](https://github.com/docker/compose/issues/9160) -- Fixed `build.go` access custom labels directly cause panic. See [compose#9810](https://github.com/docker/compose/pull/9810) -- Applied newly loaded envvars to "DockerCli" and "APIClient". Fixes [compose#9210](https://github.com/docker/compose/issues/9210) -- Only attempt to start specified services on `compose start [services]`. Fixes [compose#9796](https://github.com/docker/compose/issues/9796) [compose#9807](https://github.com/docker/compose/issues/9807) -- Label built images for reliable cleanup on `down`. Fixes [compose#9655](https://github.com/docker/compose/issues/9655) - -For the full change log or additional information, check the [Compose repository 2.11.0 release page](https://github.com/docker/compose/releases/tag/v2.11.0). - -## 2.10.2 - -{{< release-date date="2022-08-26" >}} - -### Bug fixes and enhancements - -- Properly respect `DOCKER_TLS_VERIFY` and `DOCKER_CERT_PATH` environment variables. Fixes [compose#9789](https://github.com/docker/compose/issues/9789). -- Improved `Makefile` used in [docker/docker-ce-packaging#742](https://github.com/docker/docker-ce-packaging/pull/742). - -For the full change log or additional information, check the [Compose repository 2.10.2 release page](https://github.com/docker/compose/releases/tag/v2.10.2). - -## 2.10.1 - -{{< release-date date="2022-08-24" >}} - -### Updates - -- Dependencies update: Bumped github.com/moby/buildkit from [0.10.3 to 0.10.4](https://github.com/moby/buildkit/releases/tag/v0.10.4). - -### Bug fixes and enhancements - -- Fixed image pulls being skipped when `pull_policy` was not set. Fixes [compose#9773](https://github.com/docker/compose/issues/9773). -- Restored `.sha256` checksum files in release artifacts. Fixes [compose#9772](https://github.com/docker/compose/issues/9772). -- Removed error message showing exit code when using --exit-code-from. Fixes [compose#9782](https://github.com/docker/compose/issues/9782). -- Fixed `compose pull` to pull images even when they existed locally if `tag=latest`. -- CI: Fixed checksums checking and brought back individual checksum files. - -For the full change log or additional information, check the [Compose repository 2.10.1 release page](https://github.com/docker/compose/releases/tag/v2.10.1). - -## 2.10.0 - -{{< release-date date="2022-08-19" >}} - -### New - -- Applied newly loaded environment variables to `DockerCli` and `APIClient`. Fixes [compose#9210](https://github.com/docker/compose/issues/9210). -- Added support for windows/arm64 and linux/riscv64. - -### Updates - -- Updated Dockerfile syntax to latest stable and renamed docs Dockerfile. -- Dependencies update: Upgraded BuildKit & docker/distribution. -- Dependencies update: Updated Docker CLI version used in CI to v20.10.17. -- Dependencies update: Bumped github.com/containerd/containerd from [1.6.6 to 1.6.7](https://github.com/containerd/containerd/releases/tag/v1.6.7). -- Dependencies update: Bump github.com/containerd/containerd from [1.6.7 to 1.6.8](https://github.com/containerd/containerd/releases/tag/v1.6.8). -- Dependencies update: Bumped to Go 1.18.5. -- Dependencies update: Bumped github.com/cnabio/cnab-to-oci from [0.3.5 to 0.3.6](https://github.com/cnabio/cnab-to-oci/releases/tag/v0.3.6). - -### Bug fixes and enhancements - -- Reverted environment variables precedence to OS over `.env` file. Fixes [compose#9737](https://github.com/docker/compose/issues/9737). -- Updated usage strings for consistency. -- Resolved environment variables case-insensitively on Windows. Fixes [compose#9431](https://github.com/docker/compose/issues/9431). -- Fixed `compose up` so dependency containers aren't stopped when a stop signal is issued. This keeps parity with v1 behavior-wise. -- Fixes [compose#9696](https://github.com/docker/compose/issues/9696). -- Fixed commands that start/restart/pause/unpause so that, if ran from the Compose file, the Compose model is also applied. Fixes [compose#9705](https://github.com/docker/compose/issues/9705) and [compose#9705](https://github.com/docker/compose/issues/9671). -- Removed extra whitespaces in help text of some subcommands. -- Fixed `compose create` to not override service pull policy when the value from the command line is configured as the default. Fixes [compose#9717](https://github.com/docker/compose/issues/9717). -- Filtered out "commandConn.Close- warning" message. Fixes[compose#8544](https://github.com/docker/compose/issues/8544). -- Fixed up/start/run to not wait for disabled dependency. Fixes [compose#9591](https://github.com/docker/compose/issues/9591). -- Applied Compose model on `compose kill`, added `--remove-orphans` option. Fixes [compose#9742](https://github.com/docker/compose/issues/9742). -- Fixed `compose pull` to avoid pulling the same images multiple times. Fixes [compose#8768](https://github.com/docker/compose/issues/8768). -- Fixed version of golangci-lint to v1.47.3, issue with v1.48.0 for now. - -For the full change log, check the [Compose repository 2.10.0 release page](https://github.com/docker/compose/releases/tag/v2.10.0). - -## 2.9.0 - -{{< release-date date="2022-08-7" >}} - -> [!IMPORTANT] -> -> Compose v2.9.0 contains changes to the environment variable's precedence that have since been reverted. We recommend using v2.10+ to avoid compatibility issues. - -> [!NOTE] -> -> This release reverts the breaking changes introduced in [Compose v2.8.0](#280) by [`compose-go v1.3.0`](https://github.com/compose-spec/compose-go/releases/tag/v1.3.0). - -### Updates - -- Updated [`compose-go` to v1.4.0](https://github.com/compose-spec/compose-go/releases/tag/v1.4.0) as previous version introduced breaking changes. Fixes [compose#9700](https://github.com/docker/compose/issues/9700). - -### Bug fixes and enhancements - -- Overwritten parent commands PreRun code for `compose version`. Fixes [compose#9698](https://github.com/docker/compose/issues/9698). -- Fixed `LinkLocalIPs` in V2. Fixes [compose#9692](https://github.com/docker/compose/issues/9692). -- Linked to `BUILDING.md` for testing instructions. Fixes [compose#9439](https://github.com/docker/compose/issues/9439). - -For the full change log or additional information, check the [Compose repository 2.9.0 release page](https://github.com/docker/compose/releases/tag/v2.9.0). - -## 2.8.0 - -{{< release-date date="2022-07-29" >}} - -> [!IMPORTANT] -> ->This release introduced a breaking change via `compose-go v1.3.0` and this [PR](https://github.com/compose-spec/compose-go/pull/294). -In this release, Docker Compose recreates new resources (networks, volumes, secrets, configs, etc.) with new names, using a `-` (dash) instead an `_` (underscore) and tries to connect to or use these newly created resources instead of your existing ones! -> -> Please use Compose the v2.9.0 release instead. -> - -### New - -- Introduced `--pull` flag to allow the force pull of updated service images. Fixes [compose#9451](https://github.com/docker/compose/issues/9451). -- Increased code quality by adding `gocritic` to the linters. - -### Bug fixes and enhancements - -- Fixed interpolation error message output. Fixes [compose-spec/compose-go#292](https://github.com/compose-spec/compose-go/pull/292). -- Defined precedence of the environment variables evaluation. Fixes [compose#9521](https://github.com/docker/compose/issues/9606), -[compose#9638](https://github.com/docker/compose/issues/9638), -[compose#9608](https://github.com/docker/compose/issues/9608), -[compose#9578](https://github.com/docker/compose/issues/9578). -[compose#9468](https://github.com/docker/compose/issues/9468), and -[compose#9683](https://github.com/docker/compose/issues/9468). -- Docs CI: Fixed to use push-to-fork when creating a PR. -- Used environmental variable for golang's version and updates GitHub Actions from v2 to v3. -- Used [google/addlicense](https://github.com/google/addlicense) instead of [kunalkushwaha/ltag](https://github.com/kunalkushwaha/ltag). - -For the full change log or additional information, check the [Compose repository 2.8.0 release page](https://github.com/docker/compose/releases/tag/v2.8.0). - -## 2.7.0 - -{{< release-date date="2022-07-20" >}} - -### New - -- Added support for environment secrets during build step. Fixes [compose#9606](https://github.com/docker/compose/issues/9606). - -### Updates - -- Dependencies upgrade: bumped [go to 1.18.4](https://github.com/golang/go/compare/go1.18.3...go1.18.4). -- Dependencies upgrade: bumped [compose-go to v1.2.9](https://github.com/compose-spec/compose-go/releases/tag/v1.2.9). - -### Bug fixes and enhancements - -- Networks: prevented issues due to duplicate names. Fixes [moby/moby#18864](https://github.com/moby/moby/issues/18864). -- Fixed issue with close networks name on `compose up` and `compose down` commands. Fixes [compose#9630](https://github.com/docker/compose/issues/9044). -- Used appropriate dependency condition for one-shot containers when running `compose up --wait`. Fixes [compose#9606](https://github.com/docker/compose/pull/9572). -- Fixed environment variable expansion. -- Validated depended-on services exist in consistency check. Fixes [compose#8910](https://github.com/docker/compose/issues/8910). -- Fixed hash usage in environment values. Fixes [compose#9509](https://github.com/docker/compose/issues/9509). -- Docker Build: added fix to respect dependency order for classic builder. Fixes [compose#8538](https://github.com/docker/compose/issues/8538). -- Fixed panic caused by empty string argument. Fixes [compose-switch#35](https://github.com/docker/compose-switch/issues/35). -- Fixed start/restart as to not impact one-off containers. Fixes [compose#9509](https://github.com/docker/compose/issues/9044). -- Fixed to keep the container reference when `volumes_from` targets a container and not a service. Fixes [compose#8874](https://github.com/docker/compose/issues/8874). -- build.go: added fix to initialize `CustomLabels` map if `nil`. -- Added new targets to build Compose binary before running e2e tests. -- CI: released workflow to open a PR on docs repo with latest changes. -- e2e: added test for `ps`. -- e2e: split out pause tests and add more cases. -- e2e: add more start/stop test cases. - -For the full change log or additional information, check the [Compose repository 2.7.0 release page](https://github.com/docker/compose/releases/tag/v2.7.0). - -## 2.6.1 - -{{< release-date date="2022-06-23" >}} - -### New - -- Added support for setting secrets from environment variable. Fixes [compose-spec/compose-spec#251](https://github.com/compose-spec/compose-spec/issues/251). - -### Updates - -- Upgrade: compose-go [v1.2.8](https://github.com/compose-spec/compose-go/releases/tag/v1.2.8). -- Upgrade: buildx [v0.8.2](https://github.com/docker/buildx/releases/tag/v0.8.2). -- Dependencies upgrade: bumped runc [to 1.1.2](https://github.com/opencontainers/runc/releases/tag/v1.1.2). -- Dependencies upgrade: bumped golang to [1.18.3](https://go.dev/doc/devel/release#go1.18.minor). -- Dependencies upgrade: bumped compose-go to [v1.2.8](https://github.com/compose-spec/compose-go/releases/tag/v1.2.8). -- Dependencies upgrade: bumped github.com/theupdateframework/notary from 0.6.1 to 0.7.0. -- Dependencies upgrade: bumped github.com/cnabio/cnab-to-oci from 0.3.1-beta1 to 0.3.3. -- Dependencies upgrade: bumped github.com/hashicorp/go-version from 1.3.0 to 1.5.0. -- Dependencies upgrade: bumped github.com/stretchr/testify from 1.7.0 to 1.7.2. -- Dependencies upgrade: bumped github.com/docker/buildx from 0.8.1 to 0.8.2. -- Dependencies upgrade: bumped github.com/AlecAivazis/survey/v2 from 2.3.2 to 2.3.5. -- Dependencies upgrade: bumped github.com/containerd/containerd from 1.6.2 to 1.6.6. - -### Bug fixes and enhancements - -- Added links to container create request. Fixes [#9513](https://github.com/docker/compose/issues/9513). -- Fixed `compose run` to start only direct dependencies. Fixes [#9459](https://github.com/docker/compose/issues/9459). -- Fixed `compose up` 'service not found' errors when using `--no-deps` option. Fixes [#9427](https://github.com/docker/compose/issues/9427). -- Fixed `compose down` to respect `COMPOSE_REMOVE_ORPHANS` environment variable. Fixes [#9562](https://github.com/docker/compose/issues/9562). -- Fixed project-level bind mount volumes. Fixes [docker/for-mac#6317](https://github.com/docker/for-mac/issues/6317). -- Fixed parsing of properties `deploy.limits.cpus` and `deploy.limits.pids` to respect floating-point values. Fixes [#9542](https://github.com/docker/compose/issues/9542) and [#9501](https://github.com/docker/compose/issues/9501). -- Fixed `compose ps` output to list all exposed ports. Fixes [#9257](https://github.com/docker/compose/issues/9527). -- Fixed spelling mistakes in `compose ps` code. -- Fixed `docker compose` to honor `--no-ansi` even when deprecated option is requested. -- Fixed network name and network ID possible ambiguity. -- e2e: added test for `ps`. -- e2e: unmarshalled json into container summaries. -- e2e: fixed subtests and block parallel unsafe tests. -- e2e: isolated test command env from system env. -- e2e: fixed spurious `ps` failures. -- e2e: ensured all compose commands standalone compatible. -- e2e: improved test output on failures. - -For the full change log or additional information, check the [Compose repository 2.6.1 release page](https://github.com/docker/compose/releases/tag/v2.6.1). - -## 2.6.0 - -{{< release-date date="2022-05-30" >}} - -### New - -- Added the tags property to the build section. In this property tags can be defined to be applied to the final image, in addition to the one defined in the image property. -- Added end-to-end tests to ensure there is no regression on environment variables precedence. -- Added ddev's end-to-end test. - -### Updates - -- Dependencies update: bumping [compose-go to 1.2.6](https://github.com/compose-spec/compose-go/releases/tag/v1.2.6). -- Dependencies update: bumping [compose-go to 1.2.7](https://github.com/compose-spec/compose-go/releases/tag/v1.2.7). -- Dependencies update: bumping [golang to 1.18](https://go.dev/doc/devel/release#go1.18). - -### Bug fixes and enhancements - -- Fixed `compose up` to attach only to services declared in project with enabled profiles. Fixes [#9286](https://github.com/docker/compose/issues/9286). -- Fixed flickering prompt when pulling same image from multiple services. Fixes [#9469](https://github.com/docker/compose/issues/9469). -- Fixed compose go to import .env file to OS environment to allow setting variables (such as DOCKER_BUILDKIT) through this file. Fixes [#9345](https://github.com/docker/compose/issues/9345). -- Fixed `TestLocalComposeUp` that failed locally. -- Fixed local run of make `e2e-compose-standalone`. - -For the full change log or additional information, check the [Compose repository 2.6.0 release page](https://github.com/docker/compose/releases/tag/v2.6.0). - -## 2.5.1 - -{{< release-date date="2022-05-17" >}} - -### Updates - -- Dependencies updates: bumping compose-go to 1.2.5. - -### Bug fixes and enhancements - -- Fixed resolution of project's working directive absolute path when a relative path is declared using '--env-file'. Fixes [docker/for-mac#6229](https://github.com/docker/for-mac/issues/6229). -- Fixed `compose down`: now rejects all arguments in order to clarify usage. Fixes [#9151](https://github.com/docker/compose/issues/9151). -- Fixed `compose down`: now exits with status=0 if there is nothing to remove. Fixes [#9426](https://github.com/docker/compose/issues/9426). -- Fixed extra space printed in logs output lines with --no-log-prefix option. Fixes [#9464](https://github.com/docker/compose/issues/9464). -- Clarified what the default work dir is when multiple compose files are passed. -- cp command: copy to all containers of a service as default behavior. - -For the full change log or additional information, check the [Compose repository 2.5.1 release page](https://github.com/docker/compose/releases/tag/v2.5.1). - -## 2.5.0 - -{{< release-date date="2022-04-29" >}} - -### Bug fixes and enhancements - -- Fixed panic with `compose down` command when `-p` flag specified. Fixes [#9353](https://github.com/docker/compose/issues/9353). -- Passed newly created project as input to start services (`docker compose up`). Fixes [#9356](https://github.com/docker/compose/issues/9356). -- Included services declared under links in docker-compose file as implicit dependencies. Fixes [#9301](https://github.com/docker/compose/issues/9301). -- Added changes `docker compose pull` command to respect defined policy: 1) skip services configured as `pull_policy: never` and 2) ignore those with an existing image and `pull_policy: missing`. Fixes [#3660](https://github.com/docker/compose/issues/3660). -- Error building project from resources is no longer ignored in order to prevent `down` panic. Fixes [#9383](https://github.com/docker/compose/issues/9383). -- Enforced project name to be lowercase. Fixes [#9378](https://github.com/docker/compose/issues/9378). -- Added support to build-time secrets. Fixes [#6358](https://github.com/docker/compose/issues/6358). -- Changed `compose-go` to allow (re)building volume string to be used by engine `bind` API when mount can't be used. Fixes [#9380](https://github.com/docker/compose/issues/9380). -- Provided checksums.txt file and added `--binary` to allow verification in different OS. Fixes [#9388](https://github.com/docker/compose/issues/9388). -- Added changes so locally pulled image's ID is inspected and persisted to `com.docker.compose.image`. Fixes [#9357](https://github.com/docker/compose/issues/9357). -- Fixed issue regarding IPAM gateway setup. Fixes [#9330](https://github.com/docker/compose/issues/9330). -- Added support for ppc64le architecture for docker compose binary. -- Fixed search/replace typo in `--no-TTY` documentation. - -For the full change log or additional information, check the [Compose repository 2.5.0 release page](https://github.com/docker/compose/releases/tag/v2.5.0). - -## 2.4.1 - -{{< release-date date="2022-04-04" >}} - -### Bug fixes and enhancements - -- Passed the `--rm flag` value as is to the Docker CLI when running a container with this flag. Fixes [#9314](https://github.com/docker/compose/issues/9314). -- Added ssh config to the build options when building an image from a `docker compose up` command. Fixes [#9338](https://github.com/docker/compose/issues/9338). -- Added inspection to container checking if a TTY is required. Running services with `tty:true` specified now show console output. Fixes [#9288](https://github.com/docker/compose/issues/9288). - -For the full change log or additional information, check the [Compose repository 2.4.1 release page](https://github.com/docker/compose/releases/tag/v2.4.1). - - -## 2.4.0 - -{{< release-date date="2022-04-1" >}} - -### Updates - -- Dependencies update: Bumped buildx to v0.8.1. to fix possible panic on handling build context scanning errors. - -### Bug fixes and enhancements - -- Passed the interactive flag '-i' from the Compose CLI to the Docker one to run exec command. Fixes [#9315](https://github.com/docker/compose/issues/9315). -- Compose commands now take the value of `COMPOSE_PROJECT_NAME` environmental variable into consideration. Fixes [#9316](https://github.com/docker/compose/issues/9316). -- Fixed issue of `compose down` command that when executed in contexts without any services started or resources to be deleted was returning an error. Error was due to command trying to delete an inexistent default network. Fixes [#9333](https://github.com/docker/compose/issues/9333). -- Introduced support for `cache_from`, `cache_to`, `no_cache` and `pull` attributes in the build section. These attributes allow forcing a complete rebuild from sources and checking with registry for images used. These changes provide the basis for offering `--no-cache` and `--pull` options for compose build (or equivalent) command down the line. -- Introduced support of an `--ssh` flag for the `build` command from CLI and Compose file. Fixes [#7025](https://github.com/docker/compose/issues/7025). -- Fixed typo in `--ssh` flag description. Related to [#7025](https://github.com/docker/compose/issues/7025). -- Pinned Kubernetes dependencies to the same version as in buildx. -- Passed the interactive flag from the Compose CLI to the Docker one to run exec command. -- Fixed race condition on start-stop end-to-end tests running in parallel. -- Removed code regarding an obsolete warning. -- Vendor: github.com/containerd/containerd v1.6.2. Includes a fix for CVE-2022-24769 (doesn't affect our codebase). - -For the full change log or additional information, check the [Compose repository 2.4.0 release page](https://github.com/docker/compose/releases/tag/v2.4.0). - - -## 2.3.4 - -{{< release-date date="2022-03-25" >}} - -### New - -- Introduced changes to use RunExec and RunStart from docker/cli to handle all the interactive/tty/* terminal logic. - -### Removed - -- Removed a container with no candidate now produces a warning instead of an error. Fixes [#9255](https://github.com/docker/compose/issues/9255). -- Removed the "Deprecated" mentions from -i and -t options to run and exec commands. These options are on by default and in use. Fixes [#9229](https://github.com/docker/compose/pull/9229#discussion_r819730788). -- Removed the "Deprecated" mention from the --filter flag, to keep consistency with other commands. -- Removed the need to get the original compose.yaml file to run 'docker compose kill'. - -### Updates - -- Dependencies update: Bumped github.com/spf13/cobra from 1.3.0 to 1.4.0. Cobra library no longer requires Viper and all of its indirect dependencies [See cobra's release page](https://github.com/spf13/cobra/releases). -- Dependencies update: Bumped buildx from v0.7.1 to v0.8.0. - -### Bug fixes and enhancements - -- Recovered behavior for 'compose up -d' of recreating containers of compose file images with refreshed content. Fixes [#9259](https://github.com/docker/compose/issues/9259). -- Docker compose --status, --filter and --format flags documentation updates. -- `docker compose down -v` now does not remove external volumes and networks as per the option's expected and documented behavior. Whenever project is specified it is also now used to enforce down to only remove resources listed in compose.yaml file. -Fixes [#9172](https://github.com/docker/compose/issues/9172), [#9145](https://github.com/docker/compose/issues/9145). -- Changed Compose API reference docs automation to pick up diffs code vs. docs. - -For the full change log or additional information, check the [Compose repository 2.3.4 release page](https://github.com/docker/compose/releases/tag/v2.3.4). - -## Other Releases - -(2022-03-8 to 2022-04-14) - -For the releases later than 1.29.2 and earlier than 2.3.4, please check the [Compose repository release pages](https://github.com/docker/compose/releases). - -## 1.29.2 - -(2021-05-10) - -### Miscellaneous - -- Removed the prompt to use `docker-compose` in the `up` command. - -- Bumped `py` to `1.10.0` in `requirements-indirect.txt`. - -## 1.29.1 - -(2021-04-13) - -### Bugs - -- Fixed invalid handler warning on Windows builds. - -- Fixed config hash to trigger container re-creation on IPC mode updates. - -- Fixed conversion map for `placement.max_replicas_per_node`. - -- Removed extra scan suggestion on build. - -## 1.29.0 - -(2021-04-06) - -### Features - -- Added profile filter to `docker-compose config`. - -- Added a `depends_on` condition to wait for successful service completion. - -### Miscellaneous - -- Added an image scan message on build. - -- Updated warning message for `--no-ansi` to mention `--ansi never` as alternative. - -- Bumped docker-py to 5.0.0. - -- Bumped PyYAML to 5.4.1. - -- Bumped python-dotenv to 0.17.0. - -## 1.28.6 - -(2021-03-23) - -### Bug fixes - -- Made `--env-file` relative to the current working directory. Environment file paths set with `--env-file` are now relative to the current working directory and override the default `.env` file located in the project directory. - -- Fixed missing service property `storage_opt` by updating the Compose schema. - -- Fixed build `extra_hosts` list format. - -- Removed additional error message on `exec`. - -### Miscellaneous - -- Added `compose.yml` and `compose.yaml` to the default filename list. - -## 1.28.5 - -(2021-02-26) - -### Bugs - -- Fixed the OpenSSL version mismatch error when shelling out to the SSH client (via bump to docker-py 4.4.4 which contains the fix). - -- Added missing build flags to the native builder: `platform`, `isolation` and `extra_hosts`. - -- Removed info message on native build. - -- Fixed the log fetching bug when service logging driver is set to 'none'. - -## 1.28.4 - -(2021-02-18) - -### Bug fixes - -- Fixed SSH port parsing by bumping docker-py to 4.4.3. - -### Miscellaneous - -- Bumped Python to 3.7.10. - -## 1.28.3 - -(2021-02-17) - -### Bug fixes - -- Fixed SSH hostname parsing when it contains a leading 's'/'h', and removed the quiet option that was hiding the error (via docker-py bump to 4.4.2). - -- Fixed key error for `--no-log-prefix` option. - -- Fixed incorrect CLI environment variable name for service profiles: `COMPOSE_PROFILES` instead of `COMPOSE_PROFILE`. - -- Fixed the fish completion. - -### Miscellaneous - -- Bumped cryptography to 3.3.2. - -- Removed the log driver filter. - -For a list of PRs and issues fixed in this release, see [Compose 1.28.3](https://github.com/docker/compose/milestone/53?closed=1). - -## 1.28.2 - -(2021-01-26) - -### Bug fixes - -- Revert to Python 3.7 bump for Linux static builds - -- Add bash completion for `docker-compose logs|up --no-log-prefix` - -### Miscellaneous - -- CI setup update - -## 1.28.0 - -(2021-01-20) - -### Features - -- Added support for NVIDIA GPUs through device requests. - -- Added support for service profiles. - -- Changed the SSH connection approach to the Docker CLI by shelling out to the local SSH client. Set the `COMPOSE_PARAMIKO_SSH=1` environment variable to enable the old behavior. - -- Added a flag to disable log prefix. - -- Added a flag for ANSI output control. - -- Docker Compose now uses the native Docker CLI's `build` command when building images. Set the `COMPOSE_DOCKER_CLI_BUILD=0` environment variable to disable this feature. - -### Bug fixes - -- Made `parallel_pull=True` by default. - -- Restored the warning for configs in non-swarm mode. - -- Took `--file` into account when defining `project_dir`. - -- Fixed a service attach bug on `compose up`. - -### Miscellaneous - -- Added usage metrics. - -- Synced schema with COMPOSE specification. - -- Improved failure report for missing mandatory environment variables. - -- Bumped `attrs` to 20.3.0. - -- Bumped `more_itertools` to 8.6.0. - -- Bumped `cryptograhy` to 3.2.1. - -- Bumped `cffi` to 1.14.4. - -- Bumped `virtualenv` to 20.2.2. - -- Bumped `bcrypt` to 3.2.0. - -- Bumped GitPython to 3.1.11. - -- Bumped `docker-py` to 4.4.1. - -- Bumped Python to 3.9. - -- Linux: bumped Debian base image from stretch to buster (required for Python 3.9). - -- macOS: Bumped OpenSSL 1.1.1g to 1.1.1h, and Python 3.7.7 to 3.9.0. - -- Bumped PyInstaller to 4.1. - -- Relaxed the restriction on base images to latest minor. - -- Updated READMEs. - -## 1.27.4 - -(2020-09-24) - -### Bug fixes - -- Removed path checks for bind mounts. - -- Fixed port rendering to output long form syntax for non-v1. - -- Added protocol to the Docker socket address. - -## 1.27.3 - -(2020-09-16) - -### Bug fixes - -- Merged `max_replicas_per_node` on `docker-compose config`. - -- Fixed `depends_on` serialization on `docker-compose config`. - -- Fixed scaling when some containers are not running on `docker-compose up`. - -- Enabled relative paths for `driver_opts.device` for `local` driver. - -- Allowed strings for `cpus` fields. - -## 1.27.2 - -(2020-09-10) - -### Bug fixes - -- Fixed bug on `docker-compose run` container attach. - -## 1.27.1 - -(2020-09-10) - -### Bug fixes - -- Fixed `docker-compose run` when `service.scale` is specified. - -- Allowed the `driver` property for external networks as a temporary workaround for the Swarm network propagation issue. - -- Pinned the new internal schema version to `3.9` as the default. - -- Preserved the version number configured in the Compose file. - -## 1.27.0 - -(2020-09-07) - -### Features - -- Merged 2.x and 3.x Compose formats and aligned with `COMPOSE_SPEC` schema. - -- Implemented service mode for `ipc`. - -- Passed `COMPOSE_PROJECT_NAME` environment variable in container mode. - -- Made `run` behave in the same way as `up`. - -- Used `docker build` on `docker-compose run` when `COMPOSE_DOCKER_CLI_BUILD` environment variable is set. - -- Used the docker-py default API version for engine queries (`auto`). - -- Parsed `network_mode` on build. - -### Bug fixes - -- Ignored build context path validation when building is not required. - -- Fixed float to bytes conversion via docker-py bump to 4.3.1. - -- Fixed the scale bug when the deploy section is set. - -- Fixed `docker-py` bump in `setup.py`. - -- Fixed experimental build failure detection. - -- Fixed context propagation to the Docker CLI. - -### Miscellaneous - -- Bumped `docker-py` to 4.3.1. - -- Bumped `tox` to 3.19.0. - -- Bumped `virtualenv` to 20.0.30. - -- Added script for Docs synchronization. - -## 1.26.2 - -(2020-07-02) - -### Bug fixes - -- Enforced `docker-py` 4.2.2 as minimum version when installing with pip. - -## 1.26.1 - -(2020-06-30) - -### Features - -- Bumped `docker-py` from 4.2.1 to 4.2.2. - -### Bug fixes - -- Enforced `docker-py` 4.2.1 as minimum version when installing with pip. - -- Fixed context load for non-docker endpoints. - -## 1.26.0 - -(2020-06-03) - -### Features - -- Added `docker context` support. - -- Added missing test dependency `ddt` to `setup.py`. - -- Added `--attach-dependencies` to command `up` for attaching to dependencies. - -- Allowed compatibility option with `COMPOSE_COMPATIBILITY` environment variable. - -- Bumped `Pytest` to 5.3.4 and add refactor compatibility with the new version. - -- Bumped `OpenSSL` from 1.1.1f to 1.1.1g. - -- Bumped `certifi` from 2019.11.28 to 2020.4.5.1. - -- Bumped `docker-py` from 4.2.0 to 4.2.1. - -### Bug fixes - -- Properly escaped values coming from `env_files`. - -- Synchronized compose-schemas with upstream (docker/cli). - -- Removed `None` entries on exec command. - -- Added `distribution` package to get distribution information. - -- Added `python-dotenv` to delegate `.env` file processing. - -- Stopped adjusting output on terminal width when piped into another command. - -- Showed an error message when `version` attribute is malformed. - -- Fixed HTTPS connection when `DOCKER_HOST` is remote. - -## 1.25.5 - -(2020-04-10) - -### Features - -- Bumped OpenSSL from 1.1.1d to 1.1.1f. - -- Added Compose version 3.8. - - - Limited service scale to the size specified by the field `deploy.placement.max_replicas_per_node`. - -## 1.25.4 - -(2020-02-03) - -### Bug fixes - -- Fixed the CI script to enforce the minimal MacOS version to 10.11. - -- Fixed docker-compose exec for keys with no value on environment files. - -## 1.25.3 - -(2020-01-23) - -### Bug fixes - -- Fixed the CI script to enforce the compilation with Python3. - -- Updated the binary's sha256 on the release page. - -## 1.25.2 - -(2020-01-20) - -### New features - -- Docker Compose now allows the compatibility option with `COMPOSE_COMPATIBILITY` environment variable. - -### Bug fixes - -- Fixed an issue that caused Docker Compose to crash when the `version` field was set to an invalid value. Docker Compose now displays an error message when invalid values are used in the version field. - -- Fixed an issue that caused Docker Compose to render messages incorrectly when running commands outside a terminal. - -## 1.25.1 - -(2020-01-06) - -### Bugfixes - -- Decoded the `APIError` explanation to Unicode before using it to create and start a container. - -- Docker Compose discards `com.docker.compose.filepaths` labels that have `None` as value. This usually occurs when labels originate from stdin. - -- Added OS X binary as a directory to solve slow start up time issues caused by macOS Catalina binary scan. - -- Passed the `HOME` environment variable in container mode when running with `script/run/run.sh`. - -- Docker Compose now reports images that cannot be pulled, however, are required to be built. - -## 1.25.0 - -(2019-11-18) - -### New features - -- Set no-colors to true by changing `CLICOLOR` env variable to `0`. - -- Added working directory, config files, and env file to service labels. - -- Added ARM build dependencies. - -- Added BuildKit support (use `DOCKER_BUILDKIT=1` and `COMPOSE_DOCKER_CLI_BUILD=1`). - -- Raised Paramiko to version 2.6.0. - -- Added the following tags: `docker-compose:latest`, `docker-compose:-alpine`, and `docker-compose:-debian`. - -- Raised `docker-py` to version 4.1.0. - -- Enhanced support for `requests`, up to version 2.22.0. - -- Removed empty tag on `build:cache_from`. - -- `Dockerfile` enhancement that provides for the generation of `libmusl` binaries for Alpine Linux. - -- Pulling only of images that cannot be built. - -- The `scale` attribute now accepts `0` as a value. - -- Added a `--quiet` option and a `--no-rm` option to the `docker-compose build` command. - -- Added a `--no-interpolate` option to the `docker-compose config` command. - -- Raised OpenSSL for MacOS build from `1.1.0` to `1.1.1c`. - -- Added support for the `docker-compose.yml` file's `credential_spec` configuration option. - -- Resolution of digests without having to pull the image. - -- Upgraded `pyyaml` to version `4.2b1`. - -- Lowered the severity to `warning` for instances in which `down` attempts to remove a non-existent image. - -- Mandated the use of improved API fields for project events, when possible. - -- Updated `setup.py` for modern `pypi/setuptools`, and removed `pandoc` dependencies. - -- Removed `Dockerfile.armhf`, which is no longer required. - -### Bug fixes - -- Made container service color deterministic, including the removal of the color red. - -- Fixed non-ASCII character errors (Python 2 only). - -- Changed image sizing to decimal format, to align with Docker CLI. - -- `tty` size acquired through Python POSIX support. - -- Fixed same file `extends` optimization. - -- Fixed `stdin_open`. - -- Fixed the issue of `--remove-orphans` being ignored encountered during use with `up --no-start` option. - -- Fixed `docker-compose ps --all` command. - -- Fixed the `depends_on` dependency recreation behavior. - -- Fixed bash completion for the `docker-compose build --memory` command. - -- Fixed the misleading environmental variables warning that occurs when the `docker-compose exec` command is performed. - -- Fixed the failure check in the `parallel_execute_watch function`. - -- Fixed the race condition that occurs following the pulling of an image. - -- Fixed error on duplicate mount points (a configuration error message now displays). - -- Fixed the merge on `networks` section. - -- Compose container is always connected to `stdin` by default. - -- Fixed the presentation of failed services on the `docker-compose start` command when containers are not available. - -## 1.24.1 - -(2019-06-24) - -This release contains minor improvements and bug fixes. - -## 1.24.0 - -(2019-03-28) - -### Features - -- Added support for connecting to the Docker Engine using the `ssh` protocol. - -- Added an `--all` flag to `docker-compose ps` to include stopped one-off containers - in the command's output. - -- Added bash completion for `ps --all|-a`. - -- Added support for credential_spec. - -- Added `--parallel` to `docker build`'s options in `bash` and `zsh` completion. - -### Bug fixes - -- Fixed a bug where some valid credential helpers weren't properly handled by Compose - when attempting to pull images from private registries. - -- Fixed an issue where the output of `docker-compose start` before containers were created - was misleading. - -- Compose will no longer accept whitespace in variable names sourced from environment files. - This matches the Docker CLI behavior. - -- Compose will now report a configuration error if a service attempts to declare - duplicate mount points in the volumes section. - -- Fixed an issue with the containerized version of Compose that prevented users from - writing to stdin during interactive sessions started by `run` or `exec`. - -- One-off containers started by `run` no longer adopt the restart policy of the service, - and are instead set to never restart. - -- Fixed an issue that caused some container events to not appear in the output of - the `docker-compose events` command. - -- Missing images will no longer stop the execution of `docker-compose down` commands. A warning is - now displayed instead. - -- Force `virtualenv` version for macOS CI. - -- Fixed merging of Compose files when network has `None` config. - -- Fixed `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller`. - -- Bumped `docker-py` version to `3.7.2` to fix SSH and proxy configuration issues. - -- Fixed release script and some typos on release documentation. - -## 1.23.2 - -(2018-11-28) - -### Bug fixes - -- Reverted a 1.23.0 change that appended random strings to container names - created by `docker-compose up`, causing addressability issues. - > [!NOTE] - > - > Containers created by `docker-compose run` will continue to use randomly generated names to avoid collisions during parallel runs. - -- Fixed an issue where some `dockerfile` paths would fail unexpectedly when - attempting to build on Windows. - -- Fixed a bug where build context URLs would fail to build on Windows. - -- Fixed a bug that caused `run` and `exec` commands to fail for some otherwise - accepted values of the `--host` parameter. - -- Fixed an issue where overrides for the `storage_opt` and `isolation` keys in - service definitions weren't properly applied. - -- Fixed a bug where some invalid Compose files would raise an uncaught - exception during validation. - -## 1.23.1 - -(2018-11-01) - -### Bug fixes - -- Fixed a bug where working with containers created with a version of Compose earlier than `1.23.0` - would cause unexpected crashes. - -- Fixed an issue where the behavior of the `--project-directory` flag would - vary depending on which subcommand was used. - -## 1.23.0 - -(2018-10-30) - -### Important note - -The default naming scheme for containers created by Compose in this version -has changed from `__` to -`___`, where `` is a randomly-generated -hexadecimal string. Please make sure to update scripts relying on the old -naming scheme accordingly before upgrading. - -### Features - -- Logs for containers restarting after a crash will now appear in the output - of the `up` and `logs` commands. - -- Added `--hash` option to the `docker-compose config` command, allowing users - to print a hash string for each service's configuration to facilitate rolling - updates. - -- Added `--parallel` flag to the `docker-compose build` command, allowing - Compose to build up to 5 images simultaneously. - -- Output for the `pull` command now reports status / progress even when pulling - multiple images in parallel. - -- For images with multiple names, Compose will now attempt to match the one - present in the service configuration in the output of the `images` command. - -### Bug fixes - -- Fixed an issue where parallel `run` commands for the same service would fail due to name - collisions. - -- Fixed an issue where paths longer than 260 characters on Windows clients would - cause `docker-compose build` to fail. - -- Fixed a bug where attempting to mount `/var/run/docker.sock` with - Docker Desktop for Windows would result in failure. - -- The `--project-directory` option is now used by Compose to determine where to - look for the `.env` file. - -- `docker-compose build` no longer fails when attempting to pull an image with - credentials provided by the ***gcloud credential helper***. - -- Fixed the `--exit-code-from` option in `docker-compose up` to always report - the actual exit code even when the watched container is not the cause of the - exit. - -- Fixed an issue that would prevent recreating a service in some cases where - a volume would be mapped to the same mountpoint as a volume declared within the Dockerfile for that image. - -- Fixed a bug that caused hash configuration with multiple networks to be - inconsistent, causing some services to be unnecessarily restarted. - -- Fixed a bug that would cause failures with variable substitution for services - with a name containing one or more dot characters. - -- Fixed a pipe handling issue when using the containerized version of Compose. - -- Fixed a bug causing `external: false` entries in the Compose file to be - printed as `external: true` in the output of `docker-compose config`. - -- Fixed a bug where issuing a `docker-compose pull` command on services - without a defined image key would cause Compose to crash. - -- Volumes and binds are now mounted in the order they are declared in the - service definition. - -### Miscellaneous - -- The `zsh` completion script has been updated with new options, and no - longer suggests container names where service names are expected. - -## 1.22.0 - -(2018-07-17) - -### New features - -#### Compose format version 3.7 - -- Introduced version 3.7 of the `docker-compose.yml` specification. - This version requires Docker Engine 18.06.0 or above. - -- Added support for `rollback_config` in the deploy configuration - -- Added support for the `init` parameter in service configurations - -- Added support for extension fields in service, network, volume, secret, - and config configurations - -#### Compose format version 2.4 - -- Added support for extension fields in service, network, - and volume configurations - -### Bug fixes - -- Fixed a bug that prevented deployment with some Compose files when - `DOCKER_DEFAULT_PLATFORM` was set - -- Compose will no longer try to create containers or volumes with - invalid starting characters - -- Fixed several bugs that prevented Compose commands from working properly - with containers created with an older version of Compose - -- Fixed an issue with the output of `docker-compose config` with the - `--compatibility-mode` flag enabled when the source file contains - attachable networks - -- Fixed a bug that prevented the `gcloud` credential store from working - properly when used with the Compose binary on UNIX - -- Fixed a bug that caused connection errors when trying to operate - over a non-HTTPS TCP connection on Windows - -- Fixed a bug that caused builds to fail on Windows if the Dockerfile - was located in a subdirectory of the build context - -- Fixed an issue that prevented proper parsing of UTF-8 BOM encoded - Compose files on Windows - -- Fixed an issue with handling of the double-wildcard (`**`) pattern in `.dockerignore` files when using `docker-compose build` - -- Fixed a bug that caused auth values in legacy `.dockercfg` files to be ignored -- `docker-compose build` will no longer attempt to create image names starting with an invalid character - -## 1.21.2 - -(2018-05-03) - -### Bug fixes - -- Fixed a bug where the ip_range attribute in IPAM configs was prevented - from passing validation - -## 1.21.1 - -(2018-04-27) - -### Bug fixes - -- In 1.21.0, we introduced a change to how project names are sanitized for - internal use in resource names. This caused issues when manipulating an - existing, deployed application whose name had changed as a result. - This release properly detects resources using "legacy" naming conventions. - -- Fixed an issue where specifying an in-context Dockerfile using an absolute - path would fail despite being valid. - -- Fixed a bug where IPAM option changes were incorrectly detected, preventing - redeployments. - -- Validation of v2 files now properly checks the structure of IPAM configs. - -- Improved support for credentials stores on Windows to include binaries using - extensions other than `.exe`. The list of valid extensions is determined by - the contents of the `PATHEXT` environment variable. - -- Fixed a bug where Compose would generate invalid binds containing duplicate - elements with some v3.2 files, triggering errors at the Engine level during - deployment. - -## 1.21.0 - -(2018-04-11) - -### New features - -#### Compose file version 2.4 - -- Introduced version 2.4 of the `docker-compose.yml` specification. - This version requires Docker Engine 17.12.0 or above. - -- Added support for the `platform` parameter in service definitions. - If supplied, the parameter is also used when performing build for the - service. - -#### Compose file version 2.2 and up - -- Added support for the `cpu_rt_period` and `cpu_rt_runtime` parameters - in service definitions (2.x only). - -#### Compose file version 2.1 and up - -- Added support for the `cpu_period` parameter in service definitions - (2.x only). - -- Added support for the `isolation` parameter in service build configurations. - Additionally, the `isolation` parameter in service definitions is used for - builds as well if no `build.isolation` parameter is defined. (2.x only) - -#### All formats - -- Added support for the `--workdir` flag in `docker-compose exec`. - -- Added support for the `--compress` flag in `docker-compose build`. - -- `docker-compose pull` is now performed in parallel by default. You can - opt out using the `--no-parallel` flag. The `--parallel` flag is now - deprecated and will be removed in a future version. - -- Dashes and underscores in project names are no longer stripped out. - -- `docker-compose build` now supports the use of Dockerfile from outside - the build context. - -### Bug fixes - -- Compose now checks that the volume's configuration matches the remote - volume, and errors out if a mismatch is detected. - -- Fixed a bug that caused Compose to raise unexpected errors when attempting - to create several one-off containers in parallel. - -- Fixed a bug with argument parsing when using `docker-machine config` to - generate TLS flags for `exec` and `run` commands. - -- Fixed a bug where variable substitution with an empty default value - (e.g. `${VAR:-}`) would print an incorrect warning. - -- Improved resilience when encoding of the Compose file doesn't match the - system's. Users are encouraged to use UTF-8 when possible. - -- Fixed a bug where external overlay networks in Swarm would be incorrectly - recognized as inexistent by Compose, interrupting otherwise valid - operations. - -## 1.20.0 - -(2018-03-20) - -### New features - -#### Compose file version 3.6 - -- Introduced version 3.6 of the `docker-compose.yml` specification. - This version must be used with Docker Engine 18.02.0 or above. - -- Added support for the `tmpfs.size` property in volume mappings - -#### Compose file version 3.2 and up - -- The `--build-arg` option can now be used without specifying a service - in `docker-compose build` - -#### Compose file version 2.3 - -- Added support for `device_cgroup_rules` in service definitions - -- Added support for the `tmpfs.size` property in long-form volume mappings - -- The `--build-arg` option can now be used without specifying a service - in `docker-compose build` - -#### All formats - -- Added a `--log-level` option to the top-level `docker-compose` command. - Accepted values are `debug`, `info`, `warning`, `error`, `critical`. - Default log level is `info` - -- `docker-compose run` now allows users to unset the container's entrypoint - -- Proxy configuration found in the `~/.docker/config.json` file now populates - environment and build args for containers created by Compose - -- Added the `--use-aliases` flag to `docker-compose run`, indicating that - network aliases declared in the service's config should be used for the - running container - -- Added the `--include-deps` flag to `docker-compose pull` - -- `docker-compose run` now kills and removes the running container upon - receiving `SIGHUP` - -- `docker-compose ps` now shows the containers' health status if available - -- Added the long-form `--detach` option to the `exec`, `run` and `up` - commands - -### Bug fixes - -- Fixed `.dockerignore` handling, notably with regard to absolute paths - and last-line precedence rules - -- Fixed an issue where Compose would make costly DNS lookups when connecting - to the Engine when using Docker For Mac - -- Fixed a bug introduced in 1.19.0 which caused the default certificate path - to not be honored by Compose - -- Fixed a bug where Compose would incorrectly check whether a symlink's - destination was accessible when part of a build context - -- Fixed a bug where `.dockerignore` files containing lines of whitespace - caused Compose to error out on Windows - -- Fixed a bug where `--tls*` and `--host` options wouldn't be properly honored - for interactive `run` and `exec` commands - -- A `seccomp:` entry in the `security_opt` config now correctly - sends the contents of the file to the engine - -- ANSI output for `up` and `down` operations should no longer affect the wrong - lines - -- Improved support for non-unicode locales - -- Fixed a crash occurring on Windows when the user's home directory name - contained non-ASCII characters - -- Fixed a bug occurring during builds caused by files with a negative `mtime` - values in the build context - -- Fixed an encoding bug when streaming build progress - -## 1.19.0 - -(2018-02-07) - -### Breaking changes - -- On UNIX platforms, interactive `run` and `exec` commands now require - the `docker` CLI to be installed on the client by default. To revert - to the previous behavior, users may set the `COMPOSE_INTERACTIVE_NO_CLI` - environment variable. - -### New features - -#### Compose file version 3.x - -- The output of the `config` command should now merge `deploy` options from - several Compose files in a more accurate manner - -#### Compose file version 2.3 - -- Added support for the `runtime` option in service definitions - -#### Compose file version 2.1 and up - -- Added support for the `${VAR:?err}` and `${VAR?err}` variable interpolation - syntax to indicate mandatory variables - -#### Compose file version 2.x - -- Added `priority` key to service network mappings, allowing the user to - define in which order the specified service will connect to each network - -#### All formats - -- Added `--renew-anon-volumes` (shorthand `-V`) to the `up` command, - preventing Compose from recovering volume data from previous containers for - anonymous volumes - -- Added limit for number of simultaneous parallel operations, which should - prevent accidental resource exhaustion of the server. Default is 64 and - can be configured using the `COMPOSE_PARALLEL_LIMIT` environment variable - -- Added `--always-recreate-deps` flag to the `up` command to force recreating - dependent services along with the dependency owner - -- Added `COMPOSE_IGNORE_ORPHANS` environment variable to forgo orphan - container detection and suppress warnings - -- Added `COMPOSE_FORCE_WINDOWS_HOST` environment variable to force Compose - to parse volume definitions as if the Docker host was a Windows system, - even if Compose itself is currently running on UNIX - -- Bash completion should now be able to better differentiate between running, - stopped and paused services - -### Bug fixes - -- Fixed a bug that would cause the `build` command to report a connection - error when the build context contained unreadable files or FIFO objects. - These file types will now be handled appropriately - -- Fixed various issues around interactive `run`/`exec` sessions. - -- Fixed a bug where setting TLS options with environment and CLI flags - simultaneously would result in part of the configuration being ignored - -- Fixed a bug where the DOCKER_TLS_VERIFY environment variable was being - ignored by Compose - -- Fixed a bug where the `-d` and `--timeout` flags in `up` were erroneously - marked as incompatible - -- Fixed a bug where the recreation of a service would break if the image - associated with the previous container had been removed - -- Fixed a bug where updating a mount's target would break Compose when - trying to recreate the associated service - -- Fixed a bug where `tmpfs` volumes declared using the extended syntax in - Compose files using version 3.2 would be erroneously created as anonymous - volumes instead - -- Fixed a bug where type conversion errors would print a stacktrace instead - of exiting gracefully - -- Fixed some errors related to unicode handling - -- Dependent services no longer get recreated along with the dependency owner - if their configuration hasn't changed - -- Added better validation of `labels` fields in Compose files. Label values - containing scalar types (number, boolean) now get automatically converted - to strings - -## 1.18.0 - -(2017-12-18) - -### New features - -#### Compose file version 3.5 - -- Introduced version 3.5 of the `docker-compose.yml` specification. - This version requires Docker Engine 17.06.0 or above - -- Added support for the `shm_size` parameter in build configurations - -- Added support for the `isolation` parameter in service definitions - -- Added support for custom names for network, secret and config definitions - -#### Compose file version 2.3 - -- Added support for `extra_hosts` in build configuration - -- Added support for the [long syntax](/reference/compose-file/legacy-versions.md) for volume entries, as previously introduced in the 3.2 format. - Using this syntax will create [mounts](/manuals/engine/storage/bind-mounts.md) instead of volumes. - -#### Compose file version 2.1 and up - -- Added support for the `oom_kill_disable` parameter in service definitions - (2.x only) - -- Added support for custom names for network definitions (2.x only) - - -#### All formats - -- Values interpolated from the environment will now be converted to the - proper type when used in non-string fields. - -- Added support for `--label` in `docker-compose run` - -- Added support for `--timeout` in `docker-compose down` - -- Added support for `--memory` in `docker-compose build` - -- Setting `stop_grace_period` in service definitions now also sets the - container's `stop_timeout` - -### Bug fixes - -- Fixed an issue where Compose was still handling service hostname according - to legacy engine behavior, causing hostnames containing dots to be cut up - -- Fixed a bug where the `X-Y:Z` syntax for ports was considered invalid - by Compose - -- Fixed an issue with CLI logging causing duplicate messages and inelegant - output to occur - -- Fixed an issue that caused `stop_grace_period` to be ignored when using - multiple Compose files - -- Fixed a bug that caused `docker-compose images` to crash when using - untagged images - -- Fixed a bug where the valid `${VAR:-}` syntax would cause Compose to - error out - -- Fixed a bug where `env_file` entries using an UTF-8 BOM were being read - incorrectly - -- Fixed a bug where missing secret files would generate an empty directory - in their place - -- Fixed character encoding issues in the CLI's error handlers - -- Added validation for the `test` field in healthchecks - -- Added validation for the `subnet` field in IPAM configurations - -- Added validation for `volumes` properties when using the long syntax in - service definitions - -- The CLI now explicit prevents using `-d` and `--timeout` together - in `docker-compose up` - -## 1.17.0 - -(2017-11-01) - -### New features - -#### Compose file version 3.4 - -- Introduced version 3.4 of the `docker-compose.yml` specification. - This version requires to be used with Docker Engine 17.06.0 or above. - -- Added support for `cache_from`, `network` and `target` options in build - configurations - -- Added support for the `order` parameter in the `update_config` section - -- Added support for setting a custom name in volume definitions using - the `name` parameter - -#### Compose file version 2.3 - -- Added support for `shm_size` option in build configuration - -#### Compose file version 2.x - -- Added support for extension fields (`x-*`). Also available for v3.4 files - -#### All formats - -- Added new `--no-start` to the `up` command, allowing users to create all - resources (networks, volumes, containers) without starting services. - The `create` command is deprecated in favor of this new option - -### Bug fixes - -- Fixed a bug where `extra_hosts` values would be overridden by extension - files instead of merging together - -- Fixed a bug where the validation for v3.2 files would prevent using the - `consistency` field in service volume definitions - -- Fixed a bug that would cause a crash when configuration fields expecting - unique items would contain duplicates - -- Fixed a bug where mount overrides with a different mode would create a - duplicate entry instead of overriding the original entry - -- Fixed a bug where build labels declared as a list wouldn't be properly - parsed - -- Fixed a bug where the output of `docker-compose config` would be invalid - for some versions if the file contained custom-named external volumes - -- Improved error handling when issuing a build command on Windows using an - unsupported file version - -- Fixed an issue where networks with identical names would sometimes be - created when running `up` commands concurrently. - -## 1.16.0 - -(2017-08-31) - -### New features - -#### Compose file version 2.3 - -- Introduced version 2.3 of the `docker-compose.yml` specification. - This version requires to be used with Docker Engine 17.06.0 or above. - -- Added support for the `target` parameter in build configurations - -- Added support for the `start_period` parameter in healthcheck - configurations - -#### Compose file version 2.x - -- Added support for the `blkio_config` parameter in service definitions - -- Added support for setting a custom name in volume definitions using - the `name` parameter (not available for version 2.0) - -#### All formats - -- Added new CLI flag `--no-ansi` to suppress ANSI control characters in - output - -### Bug fixes - -- Fixed a bug where nested `extends` instructions weren't resolved - properly, causing "file not found" errors - -- Fixed several issues with `.dockerignore` parsing - -- Fixed issues where logs of TTY-enabled services were being printed - incorrectly and causing `MemoryError` exceptions - -- Fixed a bug where printing application logs would sometimes be interrupted - by a `UnicodeEncodeError` exception on Python 3 - -- The `$` character in the output of `docker-compose config` is now - properly escaped - -- Fixed a bug where running `docker-compose top` would sometimes fail - with an uncaught exception - -- Fixed a bug where `docker-compose pull` with the `--parallel` flag - would return a `0` exit code when failing - -- Fixed an issue where keys in `deploy.resources` were not being validated - -- Fixed an issue where the `logging` options in the output of - `docker-compose config` would be set to `null`, an invalid value - -- Fixed the output of the `docker-compose images` command when an image - would come from a private repository using an explicit port number - -- Fixed the output of `docker-compose config` when a port definition used - `0` as the value for the published port - -## 1.15.0 - -(2017-07-26) - -### New features - -#### Compose file version 2.2 - -- Added support for the `network` parameter in build configurations. - -#### Compose file version 2.1 and up - -- The `pid` option in a service's definition now supports a `service:` - value. - -- Added support for the `storage_opt` parameter in service definitions. - This option is not available for the v3 format - -#### All formats - -- Added `--quiet` flag to `docker-compose pull`, suppressing progress output - -- Some improvements to CLI output - -### Bug fixes - -- Volumes specified through the `--volume` flag of `docker-compose run` now - complement volumes declared in the service's definition instead of replacing - them - -- Fixed a bug where using multiple Compose files would unset the scale value - defined inside the Compose file. - -- Fixed an issue where the `credHelpers` entries in the `config.json` file - were not being honored by Compose - -- Fixed a bug where using multiple Compose files with port declarations - would cause failures in Python 3 environments - -- Fixed a bug where some proxy-related options present in the user's - environment would prevent Compose from running - -- Fixed an issue where the output of `docker-compose config` would be invalid - if the original file used `Y` or `N` values - -- Fixed an issue preventing `up` operations on a previously created stack on - Windows Engine. - -## 1.14.0 - -(2017-06-19) - -### New features - -#### Compose file version 3.3 - -- Introduced version 3.3 of the `docker-compose.yml` specification. - This version requires to be used with Docker Engine 17.06.0 or above. - Note: the `credential_spec` and `configs` keys only apply to Swarm services - and will be ignored by Compose - -#### Compose file version 2.2 - -- Added the following parameters in service definitions: `cpu_count`, - `cpu_percent`, `cpus` - -#### Compose file version 2.1 - -- Added support for build labels. This feature is also available in the - 2.2 and 3.3 formats. - -#### All formats - -- Added shorthand `-u` for `--user` flag in `docker-compose exec` - -- Differences in labels between the Compose file and remote network - will now print a warning instead of preventing redeployment. - -### Bug fixes - -- Fixed a bug where service's dependencies were being rescaled to their - default scale when running a `docker-compose run` command - -- Fixed a bug where `docker-compose rm` with the `--stop` flag was not - behaving properly when provided with a list of services to remove - -- Fixed a bug where `cache_from` in the build section would be ignored when - using more than one Compose file. - -- Fixed a bug that prevented binding the same port to different IPs when - using more than one Compose file. - -- Fixed a bug where override files would not be picked up by Compose if they - had the `.yaml` extension - -- Fixed a bug on Windows Engine where networks would be incorrectly flagged - for recreation - -- Fixed a bug where services declaring ports would cause crashes on some - versions of Python 3 - -- Fixed a bug where the output of `docker-compose config` would sometimes - contain invalid port definitions - -## 1.13.0 - -(2017-05-02) - -### Breaking changes - -- `docker-compose up` now resets a service's scaling to its default value. - You can use the newly introduced `--scale` option to specify a custom - scale value - -### New features - -#### Compose file version 2.2 - -- Introduced version 2.2 of the `docker-compose.yml` specification. This - version requires to be used with Docker Engine 1.13.0 or above - -- Added support for `init` in service definitions. - -- Added support for `scale` in service definitions. The configuration's value - can be overridden using the `--scale` flag in `docker-compose up`. - The `scale` command is disabled for this file format - -#### Compose file version 2.x - -- Added support for `options` in the `ipam` section of network definitions - -### Bug fixes - -- Fixed a bug where paths provided to compose via the `-f` option were not - being resolved properly - -- Fixed a bug where the `ext_ip::target_port` notation in the ports section - was incorrectly marked as invalid - -- Fixed an issue where the `exec` command would sometimes not return control - to the terminal when using the `-d` flag - -- Fixed a bug where secrets were missing from the output of the `config` - command for v3.2 files - -- Fixed an issue where `docker-compose` would hang if no internet connection - was available - -- Fixed an issue where paths containing unicode characters passed via the `-f` - flag were causing Compose to crash - -- Fixed an issue where the output of `docker-compose config` would be invalid - if the Compose file contained external secrets - -- Fixed a bug where using `--exit-code-from` with `up` would fail if Compose - was installed in a Python 3 environment - -- Fixed a bug where recreating containers using a combination of `tmpfs` and - `volumes` would result in an invalid config state - -## 1.12.0 - -(2017-04-04) - -### New features - -#### Compose file version 3.2 - -- Introduced version 3.2 of the `docker-compose.yml` specification - -- Added support for `cache_from` in the `build` section of services - -- Added support for the new expanded ports syntax in service definitions - -- Added support for the new expanded volumes syntax in service definitions - -#### Compose file version 2.1 - -- Added support for `pids_limit` in service definitions - -#### Compose file version 2.0 and up - -- Added `--volumes` option to `docker-compose config` that lists named - volumes declared for that project - -- Added support for `mem_reservation` in service definitions (2.x only) - -- Added support for `dns_opt` in service definitions (2.x only) - -#### All formats - -- Added a new `docker-compose images` command that lists images used by - the current project's containers - -- Added a `--stop` (shorthand `-s`) option to `docker-compose rm` that stops - the running containers before removing them - -- Added a `--resolve-image-digests` option to `docker-compose config` that - pins the image version for each service to a permanent digest - -- Added a `--exit-code-from SERVICE` option to `docker-compose up`. When - used, `docker-compose` will exit on any container's exit with the code - corresponding to the specified service's exit code - -- Added a `--parallel` option to `docker-compose pull` that enables images - for multiple services to be pulled simultaneously - -- Added a `--build-arg` option to `docker-compose build` - -- Added a `--volume ` (shorthand `-v`) option to - `docker-compose run` to declare runtime volumes to be mounted - -- Added a `--project-directory PATH` option to `docker-compose` that will - affect path resolution for the project - -- When using `--abort-on-container-exit` in `docker-compose up`, the exit - code for the container that caused the abort will be the exit code of - the `docker-compose up` command - -- Users can now configure which path separator character they want to use - to separate the `COMPOSE_FILE` environment value using the - `COMPOSE_PATH_SEPARATOR` environment variable - -- Added support for port range to a single port in port mappings, such as - `8000-8010:80`. - -### Bug fixes - -- `docker-compose run --rm` now removes anonymous volumes after execution, - matching the behavior of `docker run --rm`. - -- Fixed a bug where override files containing port lists would cause a - TypeError to be raised - -- Fixed a bug where the `deploy` key would be missing from the output of - `docker-compose config` - -- Fixed a bug where scaling services up or down would sometimes re-use - obsolete containers - -- Fixed a bug where the output of `docker-compose config` would be invalid - if the project declared anonymous volumes - -- Variable interpolation now properly occurs in the `secrets` section of - the Compose file - -- The `secrets` section now properly appears in the output of - `docker-compose config` - -- Fixed a bug where changes to some networks properties would not be - detected against previously created networks - -- Fixed a bug where `docker-compose` would crash when trying to write into - a closed pipe - -- Fixed an issue where Compose would not pick up on the value of - COMPOSE_TLS_VERSION when used in combination with command-line TLS flags - -## 1.11.2 - -(2017-02-17) - -### Bug fixes - -- Fixed a bug that was preventing secrets configuration from being - loaded properly - -- Fixed a bug where the `docker-compose config` command would fail - if the config file contained secrets definitions - -- Fixed an issue where Compose on some linux distributions would - pick up and load an outdated version of the requests library - -- Fixed an issue where socket-type files inside a build folder - would cause `docker-compose` to crash when trying to build that - service - -- Fixed an issue where recursive wildcard patterns `**` were not being - recognized in `.dockerignore` files. - -## 1.11.1 - -(2017-02-09) - -### Bug fixes - -- Fixed a bug where the 3.1 file format was not being recognized as valid - by the Compose parser - -## 1.11.0 - -(2017-02-08) - -### New Features - -#### Compose file version 3.1 - -- Introduced version 3.1 of the `docker-compose.yml` specification. This - version requires Docker Engine 1.13.0 or above. It introduces support - for secrets. See the documentation for more information - -#### Compose file version 2.0 and up - -- Introduced the `docker-compose top` command that displays processes running - for the different services managed by Compose. - -### Bug fixes - -- Fixed a bug where extending a service defining a healthcheck dictionary - would cause `docker-compose` to error out. - -- Fixed an issue where the `pid` entry in a service definition was being - ignored when using multiple Compose files. - -## 1.10.1 - -(2017-02-01) - -### Bug fixes - -- Fixed an issue where the presence of older versions of the docker-py - package would cause unexpected crashes while running Compose - -- Fixed an issue where healthcheck dependencies would be lost when - using multiple compose files for a project - -- Fixed a few issues that made the output of the `config` command - invalid - -- Fixed an issue where adding volume labels to v3 Compose files would - result in an error - -- Fixed an issue on Windows where build context paths containing unicode - characters were being improperly encoded - -- Fixed a bug where Compose would occasionally crash while streaming logs - when containers would stop or restart - -## 1.10.0 - -(2017-01-18) - -### New Features - -#### Compose file version 3.0 - -- Introduced version 3.0 of the `docker-compose.yml` specification. This - version requires to be used with Docker Engine 1.13 or above and is - specifically designed to work with the `docker stack` commands. - -#### Compose file version 2.1 and up - -- Healthcheck configuration can now be done in the service definition using - the `healthcheck` parameter - -- Containers dependencies can now be set up to wait on positive healthchecks - when declared using `depends_on`. See the documentation for the updated - syntax. - - > [!NOTE] - > - > This feature will not be ported to version 3 Compose files. - -- Added support for the `sysctls` parameter in service definitions - -- Added support for the `userns_mode` parameter in service definitions - -- Compose now adds identifying labels to networks and volumes it creates - -#### Compose file version 2.0 and up - -- Added support for the `stop_grace_period` option in service definitions. - -### Bug fixes - -- Colored output now works properly on Windows. - -- Fixed a bug where docker-compose run would fail to set up link aliases - in interactive mode on Windows. - -- Networks created by Compose are now always made attachable - (Compose files v2.1 and up). - -- Fixed a bug where falsy values of `COMPOSE_CONVERT_WINDOWS_PATHS` - (`0`, `false`, empty value) were being interpreted as true. - -- Fixed a bug where forward slashes in some .dockerignore patterns weren't - being parsed correctly on Windows - -## 1.9.0 - -(2016-11-16) - -**Breaking changes** - -- When using Compose with Docker Toolbox/Machine on Windows, volume paths are - no longer converted from `C:\Users` to `/c/Users`-style by default. To - re-enable this conversion so that your volumes keep working, set the - environment variable `COMPOSE_CONVERT_WINDOWS_PATHS=1`. Users of - Docker for Windows are not affected and do not need to set the variable. - -### New Features - -- Interactive mode for `docker-compose run` and `docker-compose exec` is - now supported on Windows platforms. The `docker` binary - is required to be present on the system for this feature to work. - -- Introduced version 2.1 of the `docker-compose.yml` specification. This - version requires to be used with Docker Engine 1.12 or above. - - Added support for setting volume labels and network labels in - `docker-compose.yml`. - - Added support for the `isolation` parameter in service definitions. - - Added support for link-local IPs in the service networks definitions. - - Added support for shell-style inline defaults in variable interpolation. - The supported forms are `${FOO-default}` (fall back if FOO is unset) and - `${FOO:-default}` (fall back if FOO is unset or empty). - -- Added support for the `group_add` and `oom_score_adj` parameters in - service definitions. - -- Added support for the `internal` and `enable_ipv6` parameters in network - definitions. - -- Compose now defaults to using the `npipe` protocol on Windows. - -- Overriding a `logging` configuration will now properly merge the `options` - mappings if the `driver` values do not conflict. - -### Bug fixes - -- Fixed several bugs related to `npipe` protocol support on Windows. - -- Fixed an issue with Windows paths being incorrectly converted when - using Docker on Windows Server. - -- Fixed a bug where an empty `restart` value would sometimes result in an - exception being raised. - -- Fixed an issue where service logs containing unicode characters would - sometimes cause an error to occur. - -- Fixed a bug where unicode values in environment variables would sometimes - raise a unicode exception when retrieved. - -- Fixed an issue where Compose would incorrectly detect a configuration - mismatch for overlay networks. - -## 1.8.1 - -(2016-09-22) - -### Bug fixes - -- Fixed a bug where users using a credentials store were not able - to access their private images. - -- Fixed a bug where users using identity tokens to authenticate - were not able to access their private images. - -- Fixed a bug where an `HttpHeaders` entry in the docker configuration - file would cause Compose to crash when trying to build an image. - -- Fixed a few bugs related to the handling of Windows paths in volume - binding declarations. - -- Fixed a bug where Compose would sometimes crash while trying to - read a streaming response from the engine. - -- Fixed an issue where Compose would crash when encountering an API error - while streaming container logs. - -- Fixed an issue where Compose would erroneously try to output logs from - drivers not handled by the Engine's API. - -- Fixed a bug where options from the `docker-machine config` command would - not be properly interpreted by Compose. - -- Fixed a bug where the connection to the Docker Engine would - sometimes fail when running a large number of services simultaneously. - -- Fixed an issue where Compose would sometimes print a misleading - suggestion message when running the `bundle` command. - -- Fixed a bug where connection errors would not be handled properly by - Compose during the project initialization phase. - -- Fixed a bug where a misleading error would appear when encountering - a connection timeout. - -## 1.8.0 - -(2016-06-14) - -### Breaking Changes - -- As announced in 1.7.0, `docker-compose rm` now removes containers - created by `docker-compose run` by default. - -- Setting `entrypoint` on a service now empties out any default - command that was set on the image (i.e. any `CMD` instruction in the - Dockerfile used to build it). This makes it consistent with - the `--entrypoint` flag to `docker run`. - -### New Features - -- Added `docker-compose bundle`, a command that builds a bundle file - to be consumed by the new *Docker Stack* commands in Docker 1.12. - -- Added `docker-compose push`, a command that pushes service images - to a registry. - -- Compose now supports specifying a custom TLS version for - interaction with the Docker Engine using the `COMPOSE_TLS_VERSION` - environment variable. - -### Bug fixes - -- Fixed a bug where Compose would erroneously try to read `.env` - at the project's root when it is a directory. - -- `docker-compose run -e VAR` now passes `VAR` through from the shell - to the container, as with `docker run -e VAR`. - -- Improved config merging when multiple compose files are involved - for several service sub-keys. - -- Fixed a bug where volume mappings containing Windows drives would - sometimes be parsed incorrectly. - -- Fixed a bug in Windows environment where volume mappings of the - host's root directory would be parsed incorrectly. - -- Fixed a bug where `docker-compose config` would output an invalid - Compose file if external networks were specified. - -- Fixed an issue where unset buildargs would be assigned a string - containing `'None'` instead of the expected empty value. - -- Fixed a bug where yes/no prompts on Windows would not show before - receiving input. - -- Fixed a bug where trying to `docker-compose exec` on Windows - without the `-d` option would exit with a stacktrace. This will - still fail for the time being, but should do so gracefully. - -- Fixed a bug where errors during `docker-compose up` would show - an unrelated stacktrace at the end of the process. - -- `docker-compose create` and `docker-compose start` show more - descriptive error messages when something goes wrong. - -## 1.7.1 - -(2016-05-04) - -### Bug fixes - -- Fixed a bug where the output of `docker-compose config` for v1 files - would be an invalid configuration file. - -- Fixed a bug where `docker-compose config` would not check the validity - of links. - -- Fixed an issue where `docker-compose help` would not output a list of - available commands and generic options as expected. - -- Fixed an issue where filtering by service when using `docker-compose logs` - would not apply for newly created services. - -- Fixed a bug where unchanged services would sometimes be recreated in - in the up phase when using Compose with Python 3. - -- Fixed an issue where API errors encountered during the up phase would - not be recognized as a failure state by Compose. - -- Fixed a bug where Compose would raise a NameError because of an undefined - exception name on non-Windows platforms. - -- Fixed a bug where the wrong version of `docker-py` would sometimes be - installed alongside Compose. - -- Fixed a bug where the host value output by `docker-machine config default` - would not be recognized as valid options by the `docker-compose` - command line. - -- Fixed an issue where Compose would sometimes exit unexpectedly while - reading events broadcasted by a Swarm cluster. - -- Corrected a statement in the docs about the location of the `.env` file, - which is indeed read from the current directory, instead of in the same - location as the Compose file. - -## 1.7.0 - -(2016-04-13) - -### Breaking Changes - -- `docker-compose logs` no longer follows log output by default. It now - matches the behavior of `docker logs` and exits after the current logs - are printed. Use `-f` to get the old default behavior. - -- Booleans are no longer allows as values for mappings in the Compose file - (for keys `environment`, `labels` and `extra_hosts`). Previously this - was a warning. Boolean values should be quoted so they become string values. - -### New Features - -- Compose now looks for a `.env` file in the directory where it's run and - reads any environment variables defined inside, if they're not already - set in the shell environment. This lets you easily set defaults for - variables used in the Compose file, or for any of the `COMPOSE_*` or - `DOCKER_*` variables. - -- Added a `--remove-orphans` flag to both `docker-compose up` and - `docker-compose down` to remove containers for services that were removed - from the Compose file. - -- Added a `--all` flag to `docker-compose rm` to include containers created - by `docker-compose run`. This will become the default behavior in the next - version of Compose. - -- Added support for all the same TLS configuration flags used by the `docker` - client: `--tls`, `--tlscert`, `--tlskey`, etc. - -- Compose files now support the `tmpfs` and `shm_size` options. - -- Added the `--workdir` flag to `docker-compose run` - -- `docker-compose logs` now shows logs for new containers that are created - after it starts. - -- The `COMPOSE_FILE` environment variable can now contain multiple files, - separated by the host system's standard path separator (`:` on Mac/Linux, - `;` on Windows). - -- You can now specify a static IP address when connecting a service to a - network with the `ipv4_address` and `ipv6_address` options. - -- Added `--follow`, `--timestamp`, and `--tail` flags to the - `docker-compose logs` command. - -- `docker-compose up`, and `docker-compose start` will now start containers - in parallel where possible. - -- `docker-compose stop` now stops containers in reverse dependency order - instead of all at once. - -- Added the `--build` flag to `docker-compose up` to force it to build a new - image. It now shows a warning if an image is automatically built when the - flag is not used. - -- Added the `docker-compose exec` command for executing a process in a running - container. - - -### Bug fixes - -- `docker-compose down` now removes containers created by - `docker-compose run`. - -- A more appropriate error is shown when a timeout is hit during `up` when - using a tty. - -- Fixed a bug in `docker-compose down` where it would abort if some resources - had already been removed. - -- Fixed a bug where changes to network aliases would not trigger a service - to be recreated. - -- Fix a bug where a log message was printed about creating a new volume - when it already existed. - -- Fixed a bug where interrupting `up` would not always shut down containers. - -- Fixed a bug where `log_opt` and `log_driver` were not properly carried over - when extending services in the v1 Compose file format. - -- Fixed a bug where empty values for build args would cause file validation - to fail. - -## 1.6.2 - -(2016-02-23) - -- Fixed a bug where connecting to a TLS-enabled Docker Engine would fail with - a certificate verification error. - -## 1.6.1 - -(2016-02-23) - -### Bug fixes - -- Fixed a bug where recreating a container multiple times would cause the - new container to be started without the previous volumes. - -- Fixed a bug where Compose would set the value of unset environment variables - to an empty string, instead of a key without a value. - -- Provide a better error message when Compose requires a more recent version - of the Docker API. - -- Add a missing config field `network.aliases` which allows setting a network - scoped alias for a service. - -- Fixed a bug where `run` would not start services listed in `depends_on`. - -- Fixed a bug where `networks` and `network_mode` where not merged when using - extends or multiple Compose files. - -- Fixed a bug with service aliases where the short container id alias was - only contained 10 characters, instead of the 12 characters used in previous - versions. - -- Added a missing log message when creating a new named volume. - -- Fixed a bug where `build.args` was not merged when using `extends` or - multiple Compose files. - -- Fixed some bugs with config validation when null values or incorrect types - were used instead of a mapping. - -- Fixed a bug where a `build` section without a `context` would show a stack - trace instead of a helpful validation message. - -- Improved compatibility with swarm by only setting a container affinity to - the previous instance of a services' container when the service uses an - anonymous container volume. Previously the affinity was always set on all - containers. - -- Fixed the validation of some `driver_opts` would cause an error if a number - was used instead of a string. - -- Some improvements to the `run.sh` script used by the Compose container install - option. - -- Fixed a bug with `up --abort-on-container-exit` where Compose would exit, - but would not stop other containers. - -- Corrected the warning message that is printed when a boolean value is used - as a value in a mapping. - -## 1.6.0 - -(2016-01-15) - -### Major Features - -- Compose 1.6 introduces a new format for `docker-compose.yml` which lets - you define networks and volumes in the Compose file as well as services. It - also makes a few changes to the structure of some configuration options. - - You don't have to use it - your existing Compose files will run on Compose - 1.6 exactly as they do today. - - Check the [upgrade guide](/reference/compose-file/legacy-versions.md) - for full details. - -- Support for networking has exited experimental status and is the recommended - way to enable communication between containers. - - If you use the new file format, your app will use networking. If you aren't - ready yet, just leave your Compose file as it is and it'll continue to work - just the same. - - By default, you don't have to configure any networks. In fact, using - networking with Compose involves even less configuration than using links. - Consult the [networking guide](/manuals/compose/how-tos/networking.md) for how to use it. - - The experimental flags `--x-networking` and `--x-network-driver`, introduced - in Compose 1.5, have been removed. - -- You can now pass arguments to a build if you're using the new file format: - - build: - context: . - args: - buildno: 1 - -- You can now specify both a `build` and an `image` key if you're using the - new file format. `docker-compose build` will build the image and tag it with - the name you've specified, while `docker-compose pull` will attempt to pull - it. - -- There's a new `events` command for monitoring container events from - the application, much like `docker events`. This is a good primitive for - building tools on top of Compose for performing actions when particular - things happen, such as containers starting and stopping. - -- There's a new `depends_on` option for specifying dependencies between - services. This enforces the order of startup, and ensures that when you run - `docker-compose up SERVICE` on a service with dependencies, those are started - as well. - -### New Features - -- Added a new command `config` which validates and prints the Compose - configuration after interpolating variables, resolving relative paths, and - merging multiple files and `extends`. - -- Added a new command `create` for creating containers without starting them. - -- Added a new command `down` to stop and remove all the resources created by - `up` in a single command. - -- Added support for the `cpu_quota` configuration option. - -- Added support for the `stop_signal` configuration option. - -- Commands `start`, `restart`, `pause`, and `unpause` now exit with an - error status code if no containers were modified. - -- Added a new `--abort-on-container-exit` flag to `up` which causes `up` to - stop all container and exit once the first container exits. - -- Removed support for `FIG_FILE`, `FIG_PROJECT_NAME`, and no longer reads - `fig.yml` as a default Compose file location. - -- Removed the `migrate-to-labels` command. - -- Removed the `--allow-insecure-ssl` flag. - -### Bug fixes - -- Fixed a validation bug that prevented the use of a range of ports in - the `expose` field. - -- Fixed a validation bug that prevented the use of arrays in the `entrypoint` - field if they contained duplicate entries. - -- Fixed a bug that caused `ulimits` to be ignored when used with `extends`. - -- Fixed a bug that prevented ipv6 addresses in `extra_hosts`. - -- Fixed a bug that caused `extends` to be ignored when included from - multiple Compose files. - -- Fixed an incorrect warning when a container volume was defined in - the Compose file. - -- Fixed a bug that prevented the force shutdown behavior of `up` and - `logs`. - -- Fixed a bug that caused `None` to be printed as the network driver name - when the default network driver was used. - -- Fixed a bug where using the string form of `dns` or `dns_search` would - cause an error. - -- Fixed a bug where a container would be reported as "Up" when it was - in the restarting state. - -- Fixed a confusing error message when DOCKER_CERT_PATH was not set properly. - -- Fixed a bug where attaching to a container would fail if it was using a - non-standard logging driver (or none at all). - -## 1.5.2 - -(2015-12-03) - -- Fixed a bug which broke the use of `environment` and `env_file` with - `extends`, and caused environment keys without values to have a `None` - value, instead of a value from the host environment. - -- Fixed a regression in 1.5.1 that caused a warning about volumes to be - raised incorrectly when containers were recreated. - -- Fixed a bug which prevented building a `Dockerfile` that used `ADD ` - -- Fixed a bug with `docker-compose restart` which prevented it from - starting stopped containers. - -- Fixed handling of SIGTERM and SIGINT to properly stop containers - -- Add support for using a url as the value of `build` - -- Improved the validation of the `expose` option - -## 1.5.1 - -(2015-11-12) - -- Add the `--force-rm` option to `build`. - -- Add the `ulimit` option for services in the Compose file. - -- Fixed a bug where `up` would error with "service needs to be built" if - a service changed from using `image` to using `build`. - -- Fixed a bug that would cause incorrect output of parallel operations - on some terminals. - -- Fixed a bug that prevented a container from being recreated when the - mode of a `volumes_from` was changed. - -- Fixed a regression in 1.5.0 where non-utf-8 unicode characters would cause - `up` or `logs` to crash. - -- Fixed a regression in 1.5.0 where Compose would use a success exit status - code when a command fails due to an HTTP timeout communicating with the - docker daemon. - -- Fixed a regression in 1.5.0 where `name` was being accepted as a valid - service option which would override the actual name of the service. - -- When using `--x-networking` Compose no longer sets the hostname to the - container name. - -- When using `--x-networking` Compose will only create the default network - if at least one container is using the network. - -- When printings logs during `up` or `logs`, flush the output buffer after - each line to prevent buffering issues from hiding logs. - -- Recreate a container if one of its dependencies is being created. - Previously a container was only recreated if it's dependencies already - existed, but were being recreated as well. - -- Add a warning when a `volume` in the Compose file is being ignored - and masked by a container volume from a previous container. - -- Improve the output of `pull` when run without a tty. - -- When using multiple Compose files, validate each before attempting to merge - them together. Previously invalid files would result in not helpful errors. - -- Allow dashes in keys in the `environment` service option. - -- Improve validation error messages by including the filename as part of the - error message. - -## 1.5.0 - -(2015-11-03) - -### Breaking changes - -With the introduction of variable substitution support in the Compose file, any -Compose file that uses an environment variable (`$VAR` or `${VAR}`) in the `command:` -or `entrypoint:` field will break. - -Previously these values were interpolated inside the container, with a value -from the container environment. In Compose 1.5.0, the values will be -interpolated on the host, with a value from the host environment. - -To migrate a Compose file to 1.5.0, escape the variables with an extra `$` -(ex: `$$VAR` or `$${VAR}`). See -https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-substitution - -### Major features - -- Compose is now available for Windows. - -- Environment variables can be used in the Compose file. See - https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-substitution - -- Multiple compose files can be specified, allowing you to override - settings in the default Compose file. See - https://github.com/docker/compose/blob/8cc8e61/docs/reference/docker-compose.md - for more details. - -- Compose now produces better error messages when a file contains - invalid configuration. - -- `up` now waits for all services to exit before shutting down, - rather than shutting down as soon as one container exits. - -- Experimental support for the new docker networking system can be - enabled with the `--x-networking` flag. Read more here: - https://github.com/docker/docker/blob/8fee1c20/docs/userguide/dockernetworks.md - -### New features - -- You can now optionally pass a mode to `volumes_from`. For example, - `volumes_from: ["servicename:ro"]`. - -- Since Docker now lets you create volumes with names, you can refer to those - volumes by name in `docker-compose.yml`. For example, - `volumes: ["mydatavolume:/data"]` will mount the volume named - `mydatavolume` at the path `/data` inside the container. - - If the first component of an entry in `volumes` starts with a `.`, `/` or `~`, - it is treated as a path and expansion of relative paths is performed as - necessary. Otherwise, it is treated as a volume name and passed straight - through to Docker. - - Read more on named volumes and volume drivers here: - https://github.com/docker/docker/blob/244d9c33/docs/userguide/dockervolumes.md - -- `docker-compose build --pull` instructs Compose to pull the base image for - each Dockerfile before building. - -- `docker-compose pull --ignore-pull-failures` instructs Compose to continue - if it fails to pull a single service's image, rather than aborting. - -- You can now specify an IPC namespace in `docker-compose.yml` with the `ipc` - option. - -- Containers created by `docker-compose run` can now be named with the - `--name` flag. - -- If you install Compose with pip or use it as a library, it now works with - Python 3. - -- `image` now supports image digests (in addition to ids and tags). For example, - `image: "busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d"` - -- `ports` now supports ranges of ports. For example, - - ports: - - "3000-3005" - - "9000-9001:8000-8001" - -- `docker-compose run` now supports a `-p|--publish` parameter, much like - `docker run -p`, for publishing specific ports to the host. - -- `docker-compose pause` and `docker-compose unpause` have been implemented, - analogous to `docker pause` and `docker unpause`. - -- When using `extends` to copy configuration from another service in the same - Compose file, you can omit the `file` option. - -- Compose can be installed and run as a Docker image. This is an experimental - feature. - -### Bug fixes - -- All values for the `log_driver` option which are supported by the Docker - daemon are now supported by Compose. - -- `docker-compose build` can now be run successfully against a Swarm cluster. - -## 1.4.2 - -(2015-09-22) - -- Fixed a regression in the 1.4.1 release that would cause `docker-compose up` - without the `-d` option to exit immediately. - -## 1.4.1 - -(2015-09-10) - -### Bug fixes - -- Some configuration changes (notably changes to `links`, `volumes_from`, and - `net`) were not properly triggering a container recreate as part of - `docker-compose up`. -- `docker-compose up ` was showing logs for all services instead of - just the specified services. -- Containers with custom container names were showing up in logs as - `service_number` instead of their custom container name. -- When scaling a service sometimes containers would be recreated even when - the configuration had not changed. - - -## 1.4.0 - -(2015-08-04) - -- By default, `docker-compose up` now only recreates containers for services whose configuration has changed since they were created. This should result in a dramatic speed-up for many applications. - - The experimental `--x-smart-recreate` flag which introduced this feature in Compose 1.3.0 has been removed, and a `--force-recreate` flag has been added for when you want to recreate everything. - -- Several of Compose's commands - `scale`, `stop`, `kill` and `rm` - now perform actions on multiple containers in parallel, rather than in sequence, which will run much faster on larger applications. - -- You can now specify a custom name for a service's container with `container_name`. Because Docker container names must be unique, this means you can't scale the service beyond one container. - -- You no longer have to specify a `file` option when using `extends` - it will default to the current file. - -- Service names can now contain dots, dashes and underscores. - -- Compose can now read YAML configuration from standard input, rather than from a file, by specifying `-` as the filename. This makes it easier to generate configuration dynamically: - - $ echo 'redis: {"image": "redis"}' | docker-compose --file - up - -- There's a new `docker-compose version` command which prints extended information about Compose's bundled dependencies. - -- `docker-compose.yml` now supports `log_opt` as well as `log_driver`, allowing you to pass extra configuration to a service's logging driver. - -- `docker-compose.yml` now supports `memswap_limit`, similar to `docker run --memory-swap`. - -- When mounting volumes with the `volumes` option, you can now pass in any mode supported by the daemon, not just `:ro` or `:rw`. For example, SELinux users can pass `:z` or `:Z`. - -- You can now specify a custom volume driver with the `volume_driver` option in `docker-compose.yml`, much like `docker run --volume-driver`. - -- A bug has been fixed where Compose would fail to pull images from private registries serving plain (unsecured) HTTP. The `--allow-insecure-ssl` flag, which was previously used to work around this issue, has been deprecated and now has no effect. - -- A bug has been fixed where `docker-compose build` would fail if the build depended on a private Hub image or an image from a private registry. - -- A bug has been fixed where Compose would crash if there were containers which the Docker daemon had not finished removing. - -- Two bugs have been fixed where Compose would sometimes fail with a "Duplicate bind mount" error, or fail to attach volumes to a container, if there was a volume path specified in `docker-compose.yml` with a trailing slash. - -Thanks @mnowster, @dnephin, @ekristen, @funkyfuture, @jeffk and @lukemarsden! - -## 1.3.3 - -(2015-07-15) - -### Regression fixes - -- When stopping containers gracefully, Compose was setting the timeout to 0, effectively forcing a SIGKILL every time. -- Compose would sometimes crash depending on the formatting of container data returned from the Docker API. - -## 1.3.2 - -(2015-07-14) - -### Bug fixes - -- When there were one-off containers created by running `docker-compose run` on an older version of Compose, `docker-compose run` would fail with a name collision. Compose now shows an error if you have leftover containers of this type lying around, and tells you how to remove them. -- Compose was not reading Docker authentication config files created in the new location, `~/docker/config.json`, and authentication against private registries would therefore fail. -- When a container had a pseudo-TTY attached, its output in `docker-compose up` would be truncated. -- `docker-compose up --x-smart-recreate` would sometimes fail when an image tag was updated. -- `docker-compose up` would sometimes create two containers with the same numeric suffix. -- `docker-compose rm` and `docker-compose ps` would sometimes list services that aren't part of the current project (though no containers were erroneously removed). -- Some `docker-compose` commands would not show an error if invalid service names were passed in. - -Thanks @dano, @josephpage, @kevinsimper, @lieryan, @phemmer, @soulrebel and @sschepens! - -## 1.3.1 - -(2015-06-21) - -### Bug fixes - -- `docker-compose build` would always attempt to pull the base image before building. -- `docker-compose help migrate-to-labels` failed with an error. -- If no network mode was specified, Compose would set it to "bridge", rather than allowing the Docker daemon to use its configured default network mode. - -## 1.3.0 - -(2015-06-18) - -### Important notes - -- **This release contains breaking changes, and you will need to either remove or migrate your existing containers before running your app** - see the [upgrading section of the install docs](https://github.com/docker/compose/blob/1.3.0rc1/docs/install.md#upgrading) for details. - -- Compose now requires Docker 1.6.0 or later. - -### Improvements - -- Compose now uses container labels, rather than names, to keep track of containers. This makes Compose both faster and easier to integrate with your own tools. - -- Compose no longer uses "intermediate containers" when recreating containers for a service. This makes `docker-compose up` less complex and more resilient to failure. - -### New features - -- `docker-compose up` has an **experimental** new behavior: it will only recreate containers for services whose configuration has changed in `docker-compose.yml`. This will eventually become the default, but for now you can take it for a spin: - - $ docker-compose up --x-smart-recreate - -- When invoked in a subdirectory of a project, `docker-compose` will now climb up through parent directories until it finds a `docker-compose.yml`. - -Several new configuration keys have been added to `docker-compose.yml`: - -- `dockerfile`, like `docker build --file`, lets you specify an alternate Dockerfile to use with `build`. -- `labels`, like `docker run --labels`, lets you add custom metadata to containers. -- `extra_hosts`, like `docker run --add-host`, lets you add entries to a container's `/etc/hosts` file. -- `pid: host`, like `docker run --pid=host`, lets you reuse the same PID namespace as the host machine. -- `cpuset`, like `docker run --cpuset-cpus`, lets you specify which CPUs to allow execution in. -- `read_only`, like `docker run --read-only`, lets you mount a container's filesystem as read-only. -- `security_opt`, like `docker run --security-opt`, lets you specify [security options](/reference/cli/docker/container/run/#security-opt). -- `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](/reference/cli/docker/container/run/#log-driver). - -### Bug fixes - -- The output of `docker-compose run` was sometimes truncated, especially when running under Jenkins. -- A service's volumes would sometimes not update after volume configuration was changed in `docker-compose.yml`. -- Authenticating against third-party registries would sometimes fail. -- `docker-compose run --rm` would fail to remove the container if the service had a `restart` policy in place. -- `docker-compose scale` would refuse to scale a service beyond 1 container if it exposed a specific port number on the host. -- Compose would refuse to create multiple volume entries with the same host path. - -Thanks @ahromis, @albers, @aleksandr-vin, @antoineco, @ccverak, @chernjie, @dnephin, @edmorley, @fordhurley, @josephpage, @KyleJamesWalker, @lsowen, @mchasal, @noironetworks, @sdake, @sdurrheimer, @sherter, @stephenlawrence, @thaJeztah, @thieman, @turtlemonvh, @twhiteman, @vdemeester, @xuxinkun and @zwily! - -## 1.2.0 - -(2015-04-16) - -- `docker-compose.yml` now supports an `extends` option, which enables a service to inherit configuration from another service in another configuration file. This is really good for sharing common configuration between apps, or for configuring the same app for different environments. Here's the [documentation](https://github.com/docker/compose/blob/master/docs/). - -- When using Compose with a Swarm cluster, containers that depend on one another will be co-scheduled on the same node. This means that most Compose apps will now work out of the box, as long as they don't use `build`. - -- Repeated invocations of `docker-compose up` when using Compose with a Swarm cluster now work reliably. - -- Directories passed to `build`, filenames passed to `env_file` and volume host paths passed to `volumes` are now treated as relative to the *directory of the configuration file*, not the directory that `docker-compose` is being run in. In the majority of cases, those are the same, but if you use the `-f|--file` argument to specify a configuration file in another directory, **this is a breaking change**. - -- A service can now share another service's network namespace with `net: container:`. - -- `volumes_from` and `net: container:` entries are taken into account when resolving dependencies, so `docker-compose up ` will correctly start all dependencies of ``. - -- `docker-compose run` now accepts a `--user` argument to specify a user to run the command as, just like `docker run`. - -- The `up`, `stop` and `restart` commands now accept a `--timeout` (or `-t`) argument to specify how long to wait when attempting to gracefully stop containers, just like `docker stop`. - -- `docker-compose rm` now accepts `-f` as a shorthand for `--force`, just like `docker rm`. - -Thanks, @abesto, @albers, @alunduil, @dnephin, @funkyfuture, @gilclark, @IanVS, @KingsleyKelly, @knutwalker, @thaJeztah and @vmalloc! - -## 1.1.0 - -(2015-02-25) - -Fig has been renamed to Docker Compose, or just Compose for short. This has several implications for you: - -- The command you type is now `docker-compose`, not `fig`. -- You should rename your fig.yml to docker-compose.yml. -- If you’re installing via PyPI, the package is now `docker-compose`, so install it with `pip install docker-compose`. - -Besides that, there’s a lot of new stuff in this release: - -- We’ve made a few small changes to ensure that Compose will work with Swarm, Docker’s new clustering tool (https://github.com/docker/swarm). Eventually you'll be able to point Compose at a Swarm cluster instead of a standalone Docker host and it’ll run your containers on the cluster with no extra work from you. As Swarm is still developing, integration is rough and lots of Compose features don't work yet. - -- `docker-compose run` now has a `--service-ports` flag for exposing ports on the given service. This is useful for running your webapp with an interactive debugger, for example. - -- You can now link to containers outside your app with the `external_links` option in docker-compose.yml. - -- You can now prevent `docker-compose up` from automatically building images with the `--no-build` option. This will make fewer API calls and run faster. - -- If you don’t specify a tag when using the `image` key, Compose will default to the `latest` tag, rather than pulling all tags. - -- `docker-compose kill` now supports the `-s` flag, allowing you to specify the exact signal you want to send to a service’s containers. - -- docker-compose.yml now has an `env_file` key, analogous to `docker run --env-file`, letting you specify multiple environment variables in a separate file. This is great if you have a lot of them, or if you want to keep sensitive information out of version control. - -- docker-compose.yml now supports the `dns_search`, `cap_add`, `cap_drop`, `cpu_shares` and `restart` options, analogous to `docker run`’s `--dns-search`, `--cap-add`, `--cap-drop`, `--cpu-shares` and `--restart` options. - -- Compose now ships with Bash tab completion - see the installation and usage docs at https://github.com/docker/compose/blob/1.1.0/docs/completion.md - -- A number of bugs have been fixed - see the milestone for details: https://github.com/docker/compose/issues?q=milestone%3A1.1.0+ - -Thanks @dnephin, @squebe, @jbalonso, @raulcd, @benlangfield, @albers, @ggtools, @bersace, @dtenenba, @petercv, @drewkett, @TFenby, @paulRbr, @Aigeruth and @salehe! - -## 1.0.1 - -(2014-11-04) - - - Added an `--allow-insecure-ssl` option to allow `fig up`, `fig run` and `fig pull` to pull from insecure registries. - - Fixed `fig run` not showing output in Jenkins. - - Fixed a bug where Fig couldn't build Dockerfiles with ADD statements pointing at URLs. - -## 1.0.0 - -(2014-10-16) - -The highlights: - - - [Fig has joined Docker.](https://www.orchardup.com/blog/orchard-is-joining-docker) Fig will continue to be maintained, but we'll also be incorporating the best bits of Fig into Docker itself. - - This means the GitHub repository has moved to [https://github.com/docker/fig](https://github.com/docker/fig) and our IRC channel is now #docker-fig on Freenode. - - - Fig can be used with the [official Docker OS X installer](/manuals/desktop/setup/install/mac-install.md). Boot2Docker will mount the home directory from your host machine so volumes work as expected. - - - Fig supports Docker 1.3. - - - It is now possible to connect to the Docker daemon using TLS by using the `DOCKER_CERT_PATH` and `DOCKER_TLS_VERIFY` environment variables. - - - There is a new `fig port` command which outputs the host port binding of a service, in a similar way to `docker port`. - - - There is a new `fig pull` command which pulls the latest images for a service. - - - There is a new `fig restart` command which restarts a service's containers. - - - Fig creates multiple containers in service by appending a number to the service name. For example, `db_1`, `db_2`. As a convenience, Fig will now give the first container an alias of the service name. For example, `db`. - - This link alias is also a valid hostname and added to `/etc/hosts` so you can connect to linked services using their hostname. For example, instead of resolving the environment variables `DB_PORT_5432_TCP_ADDR` and `DB_PORT_5432_TCP_PORT`, you could just use the hostname `db` and port `5432` directly. - - - Volume definitions now support `ro` mode, expanding `~` and expanding environment variables. - - - `.dockerignore` is supported when building. - - - The project name can be set with the `FIG_PROJECT_NAME` environment variable. - - - The `--env` and `--entrypoint` options have been added to `fig run`. - - - The Fig binary for Linux is now linked against an older version of glibc so it works on CentOS 6 and Debian Wheezy. - -Other things: - - - `fig ps` now works on Jenkins and makes fewer API calls to the Docker daemon. - - `--verbose` displays more useful debugging output. - - When starting a service where `volumes_from` points to a service without any containers running, that service will now be started. - - Lots of docs improvements. Notably, environment variables are documented and official repositories are used throughout. - -Thanks @dnephin, @d11wtq, @marksteve, @rubbish, @jbalonso, @timfreund, @alunduil, @mieciu, @shuron, @moss, @suzaku and @chmouel! Whew. - -## 0.5.2 - -(2014-07-28) - - - Added a `--no-cache` option to `fig build`, which bypasses the cache just like `docker build --no-cache`. - - Fixed the `dns:` fig.yml option, which was causing fig to error out. - - Fixed a bug where fig couldn't start under Python 2.6. - - Fixed a log-streaming bug that occasionally caused fig to exit. - -Thanks @dnephin and @marksteve! - -## 0.5.1 - -(2014-07-11) - - - If a service has a command defined, `fig run [service]` with no further arguments will run it. - - The project name now defaults to the directory containing fig.yml, not the current working directory (if they're different) - - `volumes_from` now works properly with containers as well as services - - Fixed a race condition when recreating containers in `fig up` - -Thanks @ryanbrainard and @d11wtq! - -## 0.5.0 - -(2014-07-11) - - - Fig now starts links when you run `fig run` or `fig up`. - - For example, if you have a `web` service which depends on a `db` service, `fig run web ...` will start the `db` service. - - - Environment variables can now be resolved from the environment that Fig is running in. Just specify it as a blank variable in your `fig.yml` and, if set, it'll be resolved: - - ```yaml - environment: - RACK_ENV: development - SESSION_SECRET: - ``` - - - `volumes_from` is now supported in `fig.yml`. All of the volumes from the specified services and containers will be mounted: - - ```yaml - volumes_from: - - service_name - - container_name - ``` - - - A host address can now be specified in `ports`: - - ```yaml - ports: - - "0.0.0.0:8000:8000" - - "127.0.0.1:8001:8001" - ``` - - - The `net` and `workdir` options are now supported in `fig.yml`. - - The `hostname` option now works in the same way as the Docker CLI, splitting out into a `domainname` option. - - TTY behavior is far more robust, and resizes are supported correctly. - - Load YAML files safely. - -Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @mozz100 and @marksteve for their help with this release! - -## 0.4.2 - -(2014-06-18) - - - Fix various encoding errors when using `fig run`, `fig up` and `fig build`. - -## 0.4.1 - -(2014-05-08) - - - Add support for Docker 0.11.0. (Thanks @marksteve!) - - Make project name configurable. (Thanks @jefmathiot!) - - Return correct exit code from `fig run`. - -## 0.4.0 - -(2014-04-29) - - - Support Docker 0.9 and 0.10 - - Display progress bars correctly when pulling images (no more ski slopes) - - `fig up` now stops all services when any container exits - - Added support for the `privileged` config option in fig.yml (thanks @kvz!) - - Shortened and aligned log prefixes in `fig up` output - - Only containers started with `fig run` link back to their own service - - Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!) - - Error message improvements - -## 0.3.2 - -(2014-03-05) - - - Added an `--rm` option to `fig run`. (Thanks @marksteve!) - - Added an `expose` option to `fig.yml`. - -## 0.3.1 - -(2014-03-04) - - - Added contribution instructions. (Thanks @kvz!) - - Fixed `fig rm` throwing an error. - - Fixed a bug in `fig ps` on Docker 0.8.1 when there is a container with no command. - -## 0.3.0 - -(2014-03-03) - - - We now ship binaries for OS X and Linux. No more having to install with Pip! - - Add `-f` flag to specify alternate `fig.yml` files - - Add support for custom link names - - Fix a bug where recreating would sometimes hang - - Update docker-py to support Docker 0.8.0. - - Various documentation improvements - - Various error message improvements - -Thanks @marksteve, @Gazler and @teozkr! - -## 0.2.2 - -(2014-02-17) - - - Resolve dependencies using Cormen/Tarjan topological sort - - Fix `fig up` not printing log output - - Stop containers in reverse order to starting - - Fix scale command not binding ports - -Thanks to @barnybug and @dustinlacewell for their work on this release. - -## 0.2.1 - -(2014-02-04) - - - General improvements to error reporting (#77, #79) - -## 0.2.0 - -(2014-01-31) - - - Link services to themselves so run commands can access the running service. (#67) - - Much better documentation. - - Make service dependency resolution more reliable. (#48) - - Load Fig configurations with a `.yaml` extension. (#58) - -Big thanks to @cameronmaske, @mrchrisadams and @damianmoore for their help with this release. - -## 0.1.4 - -(2014-01-27) - - - Add a link alias without the project name. This makes the environment variables a little shorter: `REDIS_1_PORT_6379_TCP_ADDR`. (#54) - -## 0.1.3 - -(2014-01-23) - - - Fix ports sometimes being configured incorrectly. (#46) - - Fix log output sometimes not displaying. (#47) - -## 0.1.2 - -(2014-01-22) - - - Add `-T` option to `fig run` to disable pseudo-TTY. (#34) - - Fix `fig up` requiring the ubuntu image to be pulled to recreate containers. (#33) Thanks @cameronmaske! - - Improve reliability, fix arrow keys and fix a race condition in `fig run`. (#34, #39, #40) - -## 0.1.1 - -(2014-01-17) - - - Fix bug where ports were not exposed correctly (#29). Thanks @dustinlacewell! - -## 0.1.0 - -(2014-01-16) - - - Containers are recreated on each `fig up`, ensuring config is up-to-date with `fig.yml` (#2) - - Add `fig scale` command (#9) - - Use `DOCKER_HOST` environment variable to find Docker daemon, for consistency with the official Docker client (was previously `DOCKER_URL`) (#19) - - Truncate long commands in `fig ps` (#18) - - Fill out CLI help banners for commands (#15, #16) - - Show a friendlier error when `fig.yml` is missing (#4) - - Fix bug with `fig build` logging (#3) - - Fix bug where builds would time out if a step took a long time without generating output (#6) - - Fix bug where streaming container output over the Unix socket raised an error (#7) - -Big thanks to @tomstuart, @EnTeQuAk, @schickling, @aronasorman and @GeoffreyPlitt. - -## 0.0.2 - -(2014-01-02) - - - Improve documentation - - Try to connect to Docker on `tcp://localdocker:4243` and a UNIX socket in addition to `localhost`. - - Improve `fig up` behavior - - Add confirmation prompt to `fig rm` - - Add `fig build` command - -## 0.0.1 - -(2013-12-20) - -Initial release. diff --git a/content/manuals/compose/support-and-feedback/_index.md b/content/manuals/compose/support-and-feedback/_index.md index 1e7e9933b71..cda0bf5b304 100644 --- a/content/manuals/compose/support-and-feedback/_index.md +++ b/content/manuals/compose/support-and-feedback/_index.md @@ -2,5 +2,5 @@ build: render: never title: Support and feedback -weight: 60 +weight: 80 --- \ No newline at end of file diff --git a/content/manuals/compose/support-and-feedback/faq.md b/content/manuals/compose/support-and-feedback/faq.md index 52a113bb04f..e6033ec5a9b 100644 --- a/content/manuals/compose/support-and-feedback/faq.md +++ b/content/manuals/compose/support-and-feedback/faq.md @@ -1,7 +1,7 @@ --- -description: Frequently asked questions for Docker Compose -keywords: documentation, docs, docker, compose, faq, docker compose vs docker-compose -title: Compose FAQs +description: Answers to common questions about Docker Compose, including v1 vs v2, commands, shutdown behavior, and development setup. +keywords: docker compose faq, docker compose questions, docker-compose vs docker compose, docker compose json, docker compose stop delay, run multiple docker compose +title: Frequently asked questions about Docker Compose linkTitle: FAQs weight: 10 tags: [FAQ] @@ -40,7 +40,7 @@ containers. ### Why do my services take 10 seconds to recreate or stop? The `docker compose stop` command attempts to stop a container by sending a `SIGTERM`. It then waits -for a [default timeout of 10 seconds](/reference/cli/docker/compose/stop.md). After the timeout, +for a [default timeout of 10 seconds](/reference/cli/docker/compose/stop/). After the timeout, a `SIGKILL` is sent to the container to forcefully kill it. If you are waiting for this timeout, it means that your containers aren't shutting down when they receive the `SIGTERM` signal. diff --git a/content/manuals/compose/support-and-feedback/samples-for-compose.md b/content/manuals/compose/support-and-feedback/samples-for-compose.md deleted file mode 100644 index cbeb84380e4..00000000000 --- a/content/manuals/compose/support-and-feedback/samples-for-compose.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -description: Summary of samples related to Compose -keywords: documentation, docs, docker, compose, samples -title: Sample apps with Compose -linkTitle: Sample apps -weight: 30 -aliases: -- /compose/samples-for-compose/ ---- - -The following samples show the various aspects of how to work with Docker -Compose. As a prerequisite, be sure to [install Docker Compose](/manuals/compose/install/_index.md) -if you have not already done so. - -## Key concepts these samples cover - -The samples should help you to: - -- Define services based on Docker images using - [Compose files](/reference/compose-file/_index.md) -- Understand the relationship between `compose.yaml` and - [Dockerfiles](/reference/dockerfile/) -- Learn how to make calls to your application services from Compose files -- Learn how to deploy applications and services to a [swarm](/manuals/engine/swarm/_index.md) - -## Awesome Compose samples - -The Awesome Compose samples provide a starting point on how to integrate different frameworks and technologies using Docker Compose. All samples are available in the [Awesome-compose GitHub repo](https://github.com/docker/awesome-compose) and are ready to run with `docker compose up`. diff --git a/content/manuals/compose/trust-model.md b/content/manuals/compose/trust-model.md new file mode 100644 index 00000000000..017883cb7aa --- /dev/null +++ b/content/manuals/compose/trust-model.md @@ -0,0 +1,130 @@ +--- +title: Trust model for Compose files +weight: 70 +description: Learn how Docker Compose treats Compose files as trusted input and what this means when using files you did not author. +keywords: compose, security, trust model, oci, remote, registry, include, extends, supply chain, trust, best practices +--- + +Docker Compose treats every Compose file as trusted input. When a Compose file +requests elevated privileges, host filesystem access, or any other +configuration, Compose applies it as written. This is the same behavior as +passing flags directly to `docker run`. + +This means that any Compose file you run, whether it lives on your local +filesystem, in a Git repository, or in an OCI registry, has full control over +how containers interact with your host. The security boundary is not where the file comes from but whether you trust the author. + +Evaluating trust means asking: do you know who authored this file, can you verify it hasn't changed since you last reviewed it, and do you understand every privilege it requests? + +## The dependency chain + +A Compose application can be assembled from multiple sources. The +[`include`](/reference/compose-file/include.md) directive imports entire Compose +files, while [`extends`](/reference/compose-file/services.md#extends) inherits +configuration from a specific service in another file. Both support remote +references and can be chained: + +```text +Your command + └─ compose.yaml (local or remote) + ├─ services, volumes, networks (direct config) + ├─ include: + │ └─ oci://registry.example.com/base:v2 (remote dependency) + │ └─ services, volumes, networks (indirect config) + └─ services: + └─ app: + └─ extends: + └─ file: oci://registry.example.com/templates:v1 + └─ service: webapp (inherited config) +``` + +Each level has the same capabilities. The top-level file you inspect may appear +safe while a nested `include` or `extends` introduces services with elevated +privileges, host bind mounts, or untrusted images. These dependencies can also +change independently. Risky settings can be introduced by a nested dependency that you never +see unless you inspect the fully resolved output. + +> [!IMPORTANT] +> +> Compose warns you when a configuration references remote sources. Do not +> accept this without understanding every reference in the chain. + +## Best practices + +### Inspect the full configuration + +To see exactly what Compose applies, including all resolved `includes`, +`extends`, merged overrides, and interpolated variables, use: + +```console +$ docker compose config +``` + +For remote references: + +```console +$ docker compose -f oci://registry.example.com/myapp:latest config +``` + +Review this output before running `up` or `create`, especially when the +configuration comes from a source you have not audited. + +#### Fields to look out for + +A Compose configuration has broad control over how containers interact with the +host. The following is a non-exhaustive list of fields that carry security +implications when set by an untrusted author: + +| Field | Effect | +|-------|--------| +| `privileged` | Grants the container full access to the host | +| `cap_add` | Adds Linux capabilities such as `SYS_ADMIN` or `NET_RAW` | +| `security_opt` | Configures security profiles including seccomp and AppArmor | +| `volumes` / bind mounts | Mounts host directories into the container | +| `network_mode: host` | Shares the host network stack | +| `pid: host` | Shares the host PID namespace | +| `devices` | Exposes host devices to the container | +| `image` | Pulls and runs an arbitrary container image | + +When in doubt, look up the effect of any unfamiliar field before running the configuration. + +### CI/CD environments + +Automated pipelines are particularly sensitive because they often run with +access to credentials, cloud provider tokens, or Docker sockets. + +- Avoid referencing public or unverified Compose configurations in automated + pipelines. +- Gate updates behind your normal code review process. +- Use read-only Docker socket mounts where possible to limit your risk. + +### Pin remote references to digests + +Tags are mutable meaning anyone with push access to a registry can overwrite a tag silently, so a reference you reviewed last week may point to different content today. + +Digests are immutable. Instead of referencing by tag, pin to the digest. + +```yaml +include: + - oci://registry.example.com/base@sha256:a1b2c3d4... +``` + +Treat any update to a pinned digest as a code change. Make sure you review the new content before updating the reference. + +### Other + +- Use a private registry: Host OCI artifacts on a registry your + organization controls. Restrict who can push to it. +- Audit transitive dependencies: Check every remote `include` and `extends` + reference in the chain, not just the top-level file. +- Review all Compose confirmation prompts: When loading remote Compose files, + Compose displays confirmation prompts for interpolation variables, environment + values, and remote includes. Review these before accepting. + +## Further reading + +- [OCI artifact applications](/manuals/compose/how-tos/oci-artifact.md) +- [Use Compose in production](/manuals/compose/how-tos/production.md) +- [`include` reference](/reference/compose-file/include.md) +- [`extends` reference](/reference/compose-file/services.md#extends) +- [Manage secrets in Compose](/manuals/compose/how-tos/use-secrets.md) \ No newline at end of file diff --git a/content/manuals/copilot/_index.md b/content/manuals/copilot/_index.md deleted file mode 100644 index b40f6caedb1..00000000000 --- a/content/manuals/copilot/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Docker for GitHub Copilot -params: - sidebar: - group: Products - badge: - color: violet - text: EA -weight: 50 -description: | - Learn how to streamline Docker-related tasks with the Docker for GitHub - Copilot extension. This integration helps you generate Docker assets, analyze - vulnerabilities, and automate containerization through GitHub Copilot Chat in - various development environments. -keywords: Docker, GitHub Copilot, extension, Visual Studio Code, chat, ai, containerization ---- - -{{< summary-bar feature_name="Docker GitHub Copilot" >}} - -The [Docker for GitHub Copilot](https://github.com/marketplace/docker-for-github-copilot) -extension integrates Docker's capabilities with GitHub Copilot, providing -assistance with containerizing applications, generating Docker assets, and -analyzing project vulnerabilities. This extension helps you streamline -Docker-related tasks wherever GitHub Copilot Chat is available. - -## Key features - -Key features of the Docker for GitHub Copilot extension include: - -- Ask questions and receive responses about containerization in any context - where GitHub Copilot Chat is available, such as on GitHub.com and in Visual Studio Code. -- Automatically generate Dockerfiles, Docker Compose files, and `.dockerignore` - files for a project. -- Open pull requests with generated Docker assets directly from the chat - interface. -- Get summaries of project vulnerabilities from [Docker - Scout](/manuals/scout/_index.md) and receive next steps via the CLI. - -## Data Privacy - -The Docker agent is trained exclusively on Docker's documentation and tools to -assist with containerization and related tasks. It does not have access to your -project's data outside the context of the questions you ask. - -When using the Docker Extension for GitHub Copilot, GitHub Copilot may include -a reference to the currently open file in its request if authorized by the -user. The Docker agent can read the file to provide context-aware responses. - -If the agent is requested to check for vulnerabilities or generate -Docker-related assets, it will clone the referenced repository into in-memory -storage to perform the necessary actions. - -Source code or project metadata is never persistently stored. Questions and -answers are retained for analytics and troubleshooting. Data processed by the -Docker agent is never shared with third parties. - -## Supported languages - -The Docker Extension for GitHub Copilot supports the following programming -languages for tasks involving containerizing a project from scratch: - -- Go -- Java -- JavaScript -- Python -- Rust -- TypeScript diff --git a/content/manuals/copilot/examples.md b/content/manuals/copilot/examples.md deleted file mode 100644 index 23bc2c8edf9..00000000000 --- a/content/manuals/copilot/examples.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Example prompts for the Docker agent -linkTitle: Example prompts -description: | - Discover example prompts to interact with the Docker agent and learn how to - automate tasks like Dockerizing projects or opening pull requests. -weight: 30 ---- - -{{< summary-bar feature_name="Docker GitHub Copilot" >}} - -## Use cases - -Here are some examples of the types of questions you can ask the Docker agent: - -### Ask general Docker questions - -You can ask general question about Docker. For example: - -- `@docker what is a Dockerfile?` -- `@docker how do I build a Docker image?` -- `@docker how do I run a Docker container?` -- `@docker what does 'docker buildx imagetools inspect' do?` - -### Get help containerizing your project - -You can ask the agent to help you containerize your existing project: - -- `@docker can you help create a compose file for this project?` -- `@docker can you create a Dockerfile for this project?` - -#### Opening pull requests - -The Docker agent will analyze your project, generate the necessary files, and, -if applicable, offer to raise a pull request with the necessary Docker assets. - -Automatically opening pull requests against your repositories is only available -when the agent generates new Docker assets. - -### Analyze a project for vulnerabilities - -The agent can help you improve your security posture with [Docker -Scout](/manuals/scout/_index.md): - -- `@docker can you help me find vulnerabilities in my project?` -- `@docker does my project contain any insecure dependencies?` - -The agent will run use Docker Scout to analyze your project's dependencies, and -report whether you're vulnerable to any [known CVEs](/manuals/scout/deep-dive/advisory-db-sources.md). - -![Copilot vulnerabilities report](images/copilot-vuln-report.png?w=500px&border=1) - -## Limitations - -- The agent is currently not able to access specific files in your repository, - such as the currently-opened file in your editor, or if you pass a file - reference with your message in the chat message. - -## Feedback - -For issues or feedback, visit the [GitHub feedback repository](https://github.com/docker/copilot-issues). diff --git a/content/manuals/copilot/images/copilot-button.png b/content/manuals/copilot/images/copilot-button.png deleted file mode 100644 index 7d40b5cf495..00000000000 Binary files a/content/manuals/copilot/images/copilot-button.png and /dev/null differ diff --git a/content/manuals/copilot/images/copilot-vuln-report.png b/content/manuals/copilot/images/copilot-vuln-report.png deleted file mode 100644 index ca203875bd0..00000000000 Binary files a/content/manuals/copilot/images/copilot-vuln-report.png and /dev/null differ diff --git a/content/manuals/copilot/images/docker-agent-copilot.png b/content/manuals/copilot/images/docker-agent-copilot.png deleted file mode 100644 index 3e2476f5b28..00000000000 Binary files a/content/manuals/copilot/images/docker-agent-copilot.png and /dev/null differ diff --git a/content/manuals/copilot/install.md b/content/manuals/copilot/install.md deleted file mode 100644 index 6b8dd2a8718..00000000000 --- a/content/manuals/copilot/install.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Install the extension for your organization -linkTitle: Install -description: | - Learn how to install the Docker for GitHub Copilot extension for your - organization and manage relevant policies to enable seamless integration. -weight: 10 ---- - -{{< summary-bar feature_name="Docker GitHub Copilot" >}} - -To use the Docker for GitHub copilot extension, you first need to -[install](#install) the extension for your organization, and -[manage](#manage-policies) policies for Copilot in your organization. - -## Prerequisites - -Before you start, ensure that you're signed in to your GitHub account on -GitHub.com. - -## Install - -To install the Docker for GitHub Copilot extension for your GitHub organization: - -1. Go to the [Docker for GitHub Copilot](https://github.com/marketplace/docker-for-github-copilot) - app in the GitHub Marketplace. - -2. Select the **Add** button at the top of the page. - -3. Under **Pricing and setup**, select the organization that you want to - install the extension for and select **Install it for free**. - -4. Select the **Complete order and begin installation** button. - -5. Select the repositories where you want to use the Docker Extension for - GitHub Copilot and finish with **Install**. - -## Manage policies - -If you're enabling the extension for a GitHub organization, you also -need to enable the Copilot Extensions policy. For instructions, see -[Setting a policy for GitHub Copilot Extensions in your organization](https://docs.github.com/en/copilot/managing-copilot/managing-github-copilot-in-your-organization/setting-policies-for-copilot-in-your-organization/managing-policies-for-copilot-in-your-organization#setting-a-policy-for-github-copilot-extensions-in-your-organization). diff --git a/content/manuals/copilot/usage.md b/content/manuals/copilot/usage.md deleted file mode 100644 index 51ba028f20a..00000000000 --- a/content/manuals/copilot/usage.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Using the Docker for GitHub Copilot extension -linkTitle: Usage -description: | - Learn how to use the Docker for GitHub Copilot extension to interact with the - Docker agent, get help Dockerizing projects, and ask Docker-related questions - directly from your IDE or GitHub.com. -weight: 20 ---- - -{{< summary-bar feature_name="Docker GitHub Copilot" >}} - -The Docker Extension for GitHub Copilot provides a chat interface that you can -use to interact with the Docker agent. You can ask questions and get help -Dockerizing your project. - -The Docker agent is trained to understand Docker-related questions, and provide -guidance on Dockerfiles, Docker Compose files, and other Docker assets. - -## Setup - -Before you can start interacting with the Docker agent, make sure you've -[installed](./install.md) the extension for your organization. - -### Enable GitHub Copilot chat in your editor or IDE - -For instructions on how to use the Docker Extension for GitHub Copilot in -your editor, see: - -- [Visual Studio Code](https://docs.github.com/en/copilot/github-copilot-chat/copilot-chat-in-ides/using-github-copilot-chat-in-your-ide?tool=vscode) -- [Visual Studio](https://docs.github.com/en/copilot/github-copilot-chat/copilot-chat-in-ides/using-github-copilot-chat-in-your-ide?tool=visualstudio) -- [Codespaces](https://docs.github.com/en/codespaces/reference/using-github-copilot-in-github-codespaces) - -### Verify the setup - -You can verify that the extension has been properly installed by typing -`@docker` in the Copilot Chat window. As you type, you should see the Docker -agent appear in the chat interface. - -![Docker agent in chat](images/docker-agent-copilot.png) - -The first time you interact with the agent, you're prompted to sign in and -authorize the Copilot extension with your Docker account. - -## Asking Docker questions in your editor - -To interact with the Docker agent from within your editor or IDE: - -1. Open your project in your editor. -2. Open the Copilot chat interface. -3. Interact with the Docker agent by tagging `@docker`, followed by your question. - -## Asking Docker questions on GitHub.com - -To interact with the Docker agent from the GitHub web interface: - -1. Go to [github.com](https://github.com/) and sign in to your account. -2. Go to any repository. -3. Select the Copilot logo in the site menu, or select the floating Copilot widget, to open the chat interface. - - ![Copilot chat button](images/copilot-button.png?w=400px) - -4. Interact with the Docker agent by tagging `@docker`, followed by your question. diff --git a/content/manuals/desktop/_index.md b/content/manuals/desktop/_index.md index c5e18105331..7ed3c232805 100644 --- a/content/manuals/desktop/_index.md +++ b/content/manuals/desktop/_index.md @@ -6,7 +6,7 @@ keywords: how to use docker desktop, what is docker desktop used for, what does desktop do, using docker desktop params: sidebar: - group: Products + group: Application development grid: - title: Install Docker Desktop description: | @@ -21,7 +21,7 @@ grid: link: /desktop/use-desktop/ - title: Explore its key features description: | - Find information about [Docker VMM](/desktop/features/vmm/), [WSL](/desktop/features/wsl/), [deploying on Kubernetes](/desktop/features/kubernetes/), and more. + Find information about [Networking](/desktop/features/networking/), [Docker VMM](/desktop/features/vmm/), [WSL](/desktop/features/wsl/), and more. icon: category - title: View the release notes description: Find out about new features, improvements, and bug fixes. @@ -52,22 +52,7 @@ Docker Desktop reduces the time spent on complex setups so you can focus on writ Docker Desktop integrates with your preferred development tools and languages, and gives you access to a vast ecosystem of trusted images and templates via Docker Hub. This empowers teams to accelerate development, automate builds, enable CI/CD workflows, and collaborate securely through shared repositories. -{{< tabs >}} -{{< tab name="What's included in Docker Desktop?" >}} - -- [Docker Engine](/manuals/engine/_index.md) -- Docker CLI client -- [Docker Scout](../scout/_index.md) -- [Docker Build](/manuals/build/_index.md) -- [Docker Compose](/manuals/compose/_index.md) -- [Ask Gordon](/manuals/ai/gordon/_index.md) -- [Docker Extensions](../extensions/_index.md) -- [Docker Content Trust](/manuals/engine/security/trust/_index.md) -- [Kubernetes](https://github.com/kubernetes/kubernetes/) -- [Credential Helper](https://github.com/docker/docker-credential-helpers/) - -{{< /tab >}} -{{< tab name="What are the key features of Docker Desktop?">}} +## Key features * Ability to containerize and share any application on any cloud platform, in multiple languages and frameworks. * Quick installation and setup of a complete Docker development environment. @@ -77,7 +62,19 @@ Docker Desktop integrates with your preferred development tools and languages, a * Ability to work natively on Linux through WSL 2 on Windows machines. * Volume mounting for code and data, including file change notifications and easy access to running containers on the localhost network. -{{< /tab >}} -{{< /tabs >}} +## Products inside Docker Desktop + +- [Docker MCP Toolkit and Catalog](/manuals/ai/mcp-catalog-and-toolkit/_index.md) +- [Docker Model Runner](/manuals/ai/model-runner/_index.md) +- [Gordon](/manuals/ai/gordon/_index.md) +- [Docker Offload](/manuals/offload/_index.md) +- [Docker Engine](/manuals/engine/_index.md) +- Docker CLI client +- [Docker Build](/manuals/build/_index.md) +- [Docker Compose](/manuals/compose/_index.md) +- [Docker Scout](../scout/_index.md) +- [Kubernetes](https://github.com/kubernetes/kubernetes/) + +## Next steps {{< grid >}} diff --git a/content/manuals/desktop/cert-revoke-solution.md b/content/manuals/desktop/cert-revoke-solution.md index 2a57d683e4a..13f643c3a9c 100644 --- a/content/manuals/desktop/cert-revoke-solution.md +++ b/content/manuals/desktop/cert-revoke-solution.md @@ -4,6 +4,7 @@ keywords: Docker desktop, fix, mac, troubleshooting, macos, false malware warnin title: Resolve the recent Docker Desktop issue on macOS linkTitle: Fix startup issue for Mac weight: 220 +sitemap: false --- This guide provides steps to address a recent issue affecting some macOS users of Docker Desktop. The issue may prevent Docker Desktop from starting and in some cases, may also trigger inaccurate malware warnings. For more details about the incident, see the [blog post](https://www.docker.com/blog/incident-update-docker-desktop-for-mac/). diff --git a/content/manuals/desktop/enterprise/_index.md b/content/manuals/desktop/enterprise/_index.md index ccd1d127952..bdfa0b2d834 100644 --- a/content/manuals/desktop/enterprise/_index.md +++ b/content/manuals/desktop/enterprise/_index.md @@ -18,6 +18,6 @@ aliases: Docker Desktop Enterprise (DDE) has been deprecated and is no longer in active development. Please use [Docker Desktop](../_index.md) Community instead. -If you are an existing DDE customer, use our [Support form](https://hub.docker.com/support/desktop/) to request a transition to one of our new [subscription plans](https://www.docker.com/pricing). +If you are an existing DDE customer, use the [Support form](https://hub.docker.com/support/desktop/) to request a transition to one of the new [subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopEnterprise). If you are looking to deploy Docker Desktop at scale, contact us on [pricingquestions@docker.com](mailto:pricingquestions@docker.com). diff --git a/content/manuals/desktop/features/containerd.md b/content/manuals/desktop/features/containerd.md index 6f80994faeb..8b47f607f4c 100644 --- a/content/manuals/desktop/features/containerd.md +++ b/content/manuals/desktop/features/containerd.md @@ -1,37 +1,29 @@ --- title: containerd image store weight: 80 -description: How to activate the containerd integration feature in Docker Desktop +description: Learn about the containerd image store in Docker Desktop and how it extends image management capabilities. keywords: Docker, containerd, engine, image store, lazy-pull toc_max: 3 aliases: -- /desktop/containerd/ + - /desktop/containerd/ --- -Docker Desktop is transitioning to use containerd for image and filesystem management. This page outlines the benefits, setup process, and new capabilities enabled by the containerd image store. - -> [!NOTE] -> -> Docker Desktop maintains separate image stores for the classic and containerd image stores. -> When switching between them, images and containers from the inactive store remain on disk but are hidden until you switch back. +Docker Desktop uses containerd as its image store by default. The image store +is the component responsible for pushing, pulling, and storing images on your +filesystem. The containerd image store supports features like multi-platform +images, image attestations, and alternative snapshotters. ## What is `containerd`? -`containerd` is a container runtime that provides a lightweight, consistent interface for container lifecycle management. It is already used under the hood by Docker Engine for creating, starting, and stopping containers. - -Docker Desktop’s ongoing integration of containerd now extends to the image store, offering more flexibility and modern image support. +`containerd` is a container runtime that provides a lightweight, consistent +interface for container lifecycle and image management. It is used under the +hood by Docker Engine for creating, starting, and stopping containers. ## What is the `containerd` image store? The image store is the component responsible for pushing, pulling, and storing images on the filesystem. -The classic Docker image store is limited in the types of images that it supports. -For example, it doesn't support image indices, containing manifest lists. -When you create multi-platform images, for example, -the image index resolves all the platform-specific variants of the image. -An image index is also required when building images with attestations. - The containerd image store extends the range of image types that the Docker Engine can natively interact with. While this is a low-level architectural change, @@ -47,27 +39,38 @@ it's a prerequisite for unlocking a range of new use cases, including: [2]: https://github.com/containerd/nydus-snapshotter [3]: https://github.com/dragonflyoss/image-service -## Enable the containerd image store +## Classic image store -The containerd image store is enabled by default in Docker Desktop version 4.34 -and later, but only for clean installs or if you perform a factory reset. If -you upgrade from an earlier version of Docker Desktop, or if you use an older -version of Docker Desktop you must manually switch to the containerd image -store. +The classic image store is Docker's legacy storage backend, replaced by the +containerd image store. It doesn't support image indices or manifest lists, so +you can't load multi-platform images locally or build images with attestations. + +Most users have no reason to use the classic image store. It's available for +cases where you need to match older behavior or have compatibility +requirements. + +## Switch image stores -To manually enable this feature in Docker Desktop: +The containerd image store is enabled by default in Docker Desktop version 4.34 +and later. To switch between image stores: 1. Navigate to **Settings** in Docker Desktop. -2. In the **General** tab, check **Use containerd for pulling and storing images**. -3. Select **Apply & Restart**. +2. In the **General** tab, check or clear the **Use containerd for pulling and storing images** option. +3. Select **Apply**. -To disable the containerd image store, -clear the **Use containerd for pulling and storing images** checkbox. +> [!NOTE] +> +> Docker Desktop maintains separate image stores for the classic and containerd image stores. +> When switching between them, images and containers from the inactive store remain on disk but are hidden until you switch back. ## Build multi-platform images -The term multi-platform image refers to a bundle of images for multiple different architectures. -Out of the box, the default builder for Docker Desktop doesn't support building multi-platform images. +The containerd image store lets you build multi-platform images +and load them to your local image store: + + + +Building multi-platform images with the classic image store is not supported: ```console $ docker build --platform=linux/amd64,linux/arm64 . @@ -76,10 +79,3 @@ ERROR: Multi-platform build is not supported for the docker driver. Switch to a different driver, or turn on the containerd image store, and try again. Learn more at https://docs.docker.com/go/build-multi-platform/ ``` - -Enabling the containerd image store lets you build multi-platform images -and load them to your local image store: - - - - diff --git a/content/manuals/desktop/features/desktop-cli.md b/content/manuals/desktop/features/desktop-cli.md index 79800975597..4f04bc07ac8 100644 --- a/content/manuals/desktop/features/desktop-cli.md +++ b/content/manuals/desktop/features/desktop-cli.md @@ -6,8 +6,6 @@ description: How to use the Docker Desktop CLI keywords: cli, docker desktop, macos, windows, linux --- -{{< summary-bar feature_name="Docker Desktop CLI" >}} - The Docker Desktop CLI lets you perform key operations such as starting, stopping, restarting, and updating Docker Desktop directly from the command line. The Docker Desktop CLI provides: @@ -31,11 +29,12 @@ docker desktop COMMAND [OPTIONS] | `status` | Displays whether Docker Desktop is running or stopped. | | `engine ls` | Lists available engines (Windows only) | | `engine use` | Switch between Linux and Windows containers (Windows only) | -| `update` | Manage Docker Desktop updates. Available for Mac only with Docker Desktop version 4.38, or all OSs with Docker Desktop version 4.39 and later. | +| `update` | Manage Docker Desktop updates. | | `logs` | Print log entries | | `disable` | Disable a feature | | `enable` | Enable a feature | | `version` | Show the Docker Desktop CLI plugin version information | -| `module` | Manage Docker Desktop modules | +| `kubernetes` | List Kubernetes images used by Docker Desktop or restart the cluster. Available with Docker Desktop version 4.44 and later. | +| `diagnose` | Diagnose Docker Desktop and upload the diagnostics. Available with Docker Desktop 4.60 and later. | -For more details on each command, see the [Docker Desktop CLI reference](/reference/cli/docker/desktop/_index.md). +For more details on each command, see the [Docker Desktop CLI reference](/reference/cli/docker/desktop/). diff --git a/content/manuals/desktop/features/dev-box.md b/content/manuals/desktop/features/dev-box.md deleted file mode 100644 index 31f7b7822cd..00000000000 --- a/content/manuals/desktop/features/dev-box.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -Title: Docker Desktop in Microsoft Dev Box -description: Learn about the benefits of and how to setup Docker Desktop in Microsoft Dev Box -keywords: desktop, docker, windows, microsoft dev box ---- - -Docker Desktop is available as a pre-configured image in the Microsoft Azure Marketplace for use with Microsoft Dev Box, allowing developers to quickly set up consistent development environments in the cloud. - -Microsoft Dev Box provides cloud-based, pre-configured developer workstations that allow you to code, build, and test applications without configuring a local development environment. The Docker Desktop image for Microsoft Dev Box comes with Docker Desktop and its dependencies pre-installed, giving you a ready-to-use containerized development environment. - -## Key benefits - -- Pre-configured environment: Docker Desktop, WSL2, and other requirements come pre-installed and configured -- Consistent development: Ensure all team members work with the same Docker environment -- Powerful resources: Access more compute power and storage than might be available on local machines -- State persistence: Dev Box maintains your state between sessions, similar to hibernating a local machine -- Seamless licensing: Use your existing Docker subscription or purchase a new one directly through Azure Marketplace - -## Setup - -### Prerequisites - -- An Azure subscription -- Access to Microsoft Dev Box -- A Docker subscription (Pro, Team, or Business). You can use Docker Desktop in Microsoft Dev Box with any of the following subscription options: - - An existing or new Docker subscription - - A new Docker subscription purchased through Azure Marketplace - - A Docker Business subscription with SSO configured for your organization - -### Set up Docker Desktop in Dev Box - -1. Navigate to the [Docker Desktop for Microsoft Dev Box](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/dockerinc1694120899427.devbox_azuremachine?tab=Overview) listing in Azure Marketplace. -2. Select **Get It Now** to add the virtual machine image to your subscription. -3. Follow the Azure workflow to complete the setup. -4. Use the image to create VMs, assign to Dev Centers, or create Dev Box Pools according to your organization's setup. - -### Activate Docker Desktop - -Once your Dev Box is provisioned with the Docker Desktop image: - -1. Start your Dev Box instance. -2. Launch Docker Desktop. -3. Sign in with your Docker ID. - -## Support - -For issues related to: - -- Docker Desktop configuration, usage, or licensing: Create a support ticket through [Docker Support](https://hub.docker.com/support). -- Dev Box creation, Azure portal configuration, or networking: Contact Azure Support. - -## Limitations - -- Microsoft Dev Box is currently only available on Windows 10 and 11 (Linux VMs are not supported). -- Performance may vary based on your Dev Box configuration and network conditions. diff --git a/content/manuals/desktop/features/dev-environments/_index.md b/content/manuals/desktop/features/dev-environments/_index.md deleted file mode 100644 index def2e621485..00000000000 --- a/content/manuals/desktop/features/dev-environments/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: Dev Environments -keywords: Dev Environments, share, local, Compose -title: Overview of Dev Environments -linkTitle: Dev Environments -weight: 130 -aliases: -- /desktop/dev-environments/ -params: - sidebar: - badge: - color: blue - text: Beta ---- - -{{% include "dev-envs-changing.md" %}} - -{{< summary-bar feature_name="Dev Environments" >}} - -Dev Environments let you create a configurable developer environment with all the code and tools you need to quickly get up and running. - -It uses tools built into code editors that allows Docker to access code mounted into a container rather than on your local host. This isolates the tools, files and running services on your machine allowing multiple versions of them to exist side by side. - -You can use Dev Environments through the intuitive GUI in Docker Desktop Dashboard or straight from your terminal with the new [`docker dev` CLI plugin](dev-cli.md). - -## Use Dev Environments - -To use Dev Environments: -1. Navigate to the **Features in Development** tab in **Settings**. -2. On the **Beta** tab, select **Turn on Dev Environments**. -3. Select **Apply & restart**. - -The Dev Environments tab is now visible in Docker Desktop Dashboard. - -## How does it work? - ->**Changes to Dev Environments with Docker Desktop 4.13** -> ->Docker has simplified how you configure your dev environment project. All you need to get started is a `compose-dev.yaml` file. If you have an existing project with a `.docker/` folder this is automatically migrated the next time you launch. - -Dev Environments is powered by [Docker Compose](/compose/). This allows Dev Environments to take advantage of all the benefits and features of Compose whilst adding an intuitive GUI where you can launch environments with the click of a button. - -Every dev environment you want to run needs a `compose-dev.yaml` file which configures your application's services and lives in your project directory. You don't need to be an expert in Docker Compose or write a `compose-dev.yaml` file from scratch as Dev Environments creates a starter `compose-dev.yaml` files based on the main language in your project. - -You can also use the many [sample dev environments](https://github.com/docker/awesome-compose) as a starting point for how to integrate different services. Alternatively, see [Set up a dev environment](set-up.md) for more information. - -## What's next? - -Learn how to: -- [Launch a dev environment](create-dev-env.md) -- [Set up a dev environment](set-up.md) -- [Distribute your dev environment](share.md) diff --git a/content/manuals/desktop/features/dev-environments/create-dev-env.md b/content/manuals/desktop/features/dev-environments/create-dev-env.md deleted file mode 100644 index 51a833c5d2c..00000000000 --- a/content/manuals/desktop/features/dev-environments/create-dev-env.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -description: Dev Environments -keywords: Dev Environments, share, Docker Desktop, Compose, launch -title: Launch a dev environment -aliases: -- /desktop/dev-environments/create-compose-dev-env/ -- /desktop/dev-environments/create-dev-env/ -weight: 10 ---- - -{{% include "dev-envs-changing.md" %}} - -You can launch a dev environment from a: -- Git repository -- Branch or tag of a Git repository -- Sub-folder of a Git repository -- Local folder - -This does not conflict with any of the local files or local tooling set up on your host. - ->Tip -> ->Install the [Dev Environments browser extension](https://github.com/docker/dev-envs-extension) for [Chrome](https://chrome.google.com/webstore/detail/docker-dev-environments/gnagpachnalcofcblcgdbofnfakdbeka) or [Firefox](https://addons.mozilla.org/en-US/firefox/addon/docker-dev-environments/), to launch a dev environment faster. - -## Prerequisites - -To get started with Dev Environments, you must also install the following tools and extension on your machine: - -- [Git](https://git-scm.com). Make sure add Git to your PATH if you're a Windows user. -- [Visual Studio Code](https://code.visualstudio.com/) -- [Visual Studio Code Remote Containers Extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) - - After Git is installed, restart Docker Desktop. Select **Quit Docker Desktop**, and then start it again. - -## Launch a dev environment from a Git repository - -> [!NOTE] -> -> When cloning a Git repository using SSH, ensure you've added your SSH key to the ssh-agent. To do this, open a terminal and run `ssh-add `. - -> [!IMPORTANT] -> -> If you have enabled the WSL 2 integration in Docker Desktop for Windows, make sure you have an SSH agent running in your WSL 2 distribution. - -{{< accordion title="How to start an SSH agent in WSL 2" >}} - -If your WSL 2 distribution doesn't have an `ssh-agent` running, you can append this script at the end of your profile file (that is: ~/.profile, ~/.zshrc, ...). - -```bash -SSH_ENV="$HOME/.ssh/agent-environment" -function start_agent { - echo "Initializing new SSH agent..." - /usr/bin/ssh-agent | sed 's/^echo/#echo/' > "${SSH_ENV}" - echo succeeded - chmod 600 "${SSH_ENV}" - . "${SSH_ENV}" > /dev/null -} -# Source SSH settings, if applicable -if [ -f "${SSH_ENV}" ]; then - . "${SSH_ENV}" > /dev/null - ps -ef | grep ${SSH_AGENT_PID} | grep ssh-agent$ > /dev/null || { - start_agent; - } -else - start_agent; -fi -``` - -{{< /accordion >}} - -To launch a dev environment: - -1. From the **Dev Environments** tab in Docker Dashboard, select **Create**. The **Create a Dev Environment** dialog displays. -2. Select **Get Started**. -3. Optional: Provide a name for you dev environment. -4. Select **Existing Git repo** as the source and then paste your Git repository link into the field provided. -5. Choose your IDE. You can choose either: - - **Visual Studio Code**. The Git repository is cloned into a Volume and attaches to your containers. This allows you to develop directly inside of them using Visual Studio Code. - - **Other**. The Git repository is cloned into your chosen local directory and attaches to your containers as a bind mount. This shares the directory from your computer to the container, and allows you to develop using any local editor or IDE. -6. Select **Continue**. - -To launch the application, run the command `make run` in your terminal. This opens an http server on port 8080. Open [http://localhost:8080](http://localhost:8080) in your browser to see the running application. - - -## Launch from a specific branch or tag - -You can launch a dev environment from a specific branch, for example a branch corresponding to a Pull Request, or a tag by adding `@mybranch` or `@tag` as a suffix to your Git URL: - - `https://github.com/dockersamples/single-dev-env@mybranch` - - or - - `git@github.com:dockersamples/single-dev-env.git@mybranch` - -Docker then clones the repository with your specified branch or tag. - -## Launch from a subfolder of a Git repository - ->Note -> ->Currently, Dev Environments is not able to detect the main language of the subdirectory. You need to define your own base image or services in a `compose-dev.yaml`file located in your subdirectory. For more information on how to configure, see the [React application with a Spring backend and a MySQL database sample](https://github.com/docker/awesome-compose/tree/master/react-java-mysql) or the [Go server with an Nginx proxy and a Postgres database sample](https://github.com/docker/awesome-compose/tree/master/nginx-golang-postgres). - -1. From **Dev Environments** in Docker Dashboard, select **Create**. The **Create a Dev Environment** dialog displays. -2. Select **Get Started**. -3. Optional: Provide a name for you dev environment. -4. Select **Existing Git repo** as the source and then paste the link of your Git repo subfolder into the field provided. -5. Choose your IDE. You can choose either: - - **Visual Studio Code**. The Git repository is cloned into a Volume and attaches to your containers. This allows you to develop directly inside of them using Visual Studio Code. - - **Other**. The Git repository is cloned into your chosen local directory and attaches to your containers as a bind mount. This shares the directory from your computer to the container, and allows you to develop using any local editor or IDE. -6. Select **Continue**. - -To launch the application, run the command `make run` in your terminal. This opens an http server on port 8080. Open [http://localhost:8080](http://localhost:8080) in your browser to see the running application. - -## Launch from a local folder - -1. From **Dev Environments** in Docker Dashboard, select **Create**. The **Create a Dev Environment** dialog displays. -2. Select **Get Started**. -3. Optional: Provide a name for your dev environment. -4. Choose **Local directory** as the source. -5. Select **Select** to open the root directory of the code that you would like to work on. - - A directory from your computer is bind mounted to the container, so any changes you make locally is reflected in the dev environment. You can use an editor or IDE of your choice. - -> [!NOTE] -> -> When using a local folder for a dev environment, file changes are synchronized between your environment container and your local files. This can affect the performance inside the container, depending on the number of files in your local folder and the operations performed in the container. - -## What's next? - -Learn how to: -- [Set up a dev environment](set-up.md) -- [Distribute your dev environment](share.md) diff --git a/content/manuals/desktop/features/dev-environments/dev-cli.md b/content/manuals/desktop/features/dev-environments/dev-cli.md deleted file mode 100644 index 2f7d66d5f9a..00000000000 --- a/content/manuals/desktop/features/dev-environments/dev-cli.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Set up a dev Environments -keywords: Dev Environments, share, docker dev, Docker Desktop -title: Use the docker dev CLI plugin -aliases: -- /desktop/dev-environments/dev-cli/ ---- - -{{% include "dev-envs-changing.md" %}} - -Use the new `docker dev` CLI plugin to get the full Dev Environments experience from the terminal in addition to the Dashboard. - -It is available with [Docker Desktop 4.13.0 and later](/manuals/desktop/release-notes.md). - -### Usage - -```bash -docker dev [OPTIONS] COMMAND -``` - -### Commands - -| Command | Description | -|:---------------------|:-----------------------------------------| -| `check` | Check Dev Environments | -| `create` | Create a new dev environment | -| `list` | Lists all dev environments | -| `logs` | Traces logs from a dev environment | -| `open` | Open Dev Environment with the IDE | -| `rm` | Removes a dev environment | -| `start` | Starts a dev environment | -| `stop` | Stops a dev environment | -| `version` | Shows the Docker Dev version information | - -### `docker dev check` - -#### Usage - -`docker dev check [OPTIONS]` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:------------------------------------| -| `--format`,`-f` | Format the output. | - -### `docker dev create` - -#### Usage - -`docker dev create [OPTIONS] REPOSITORY_URL` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:----------------------------------------------------------| -| `--detach`,`-d` | Detach creates a Dev Env without attaching to it's logs. | -| `--open`,`-o` | Open IDE after a successful creation | - -### `docker dev list` - -#### Usage - -`docker dev list [OPTIONS]` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:------------------------------| -| `--format`,`-f` | Format the output | -| `--quiet`,`-q` | Only show dev environments names | - -### `docker dev logs` - -#### Usage - -`docker dev logs [OPTIONS] DEV_ENV_NAME` - -### `docker dev open` - -#### Usage - -`docker dev open DEV_ENV_NAME CONTAINER_REF [OPTIONS]` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:----------------------| -| `--editor`,`-e` | Editor. | - -### `docker dev rm` - -#### Usage - -`docker dev rm DEV_ENV_NAME` - -### `docker dev start` - -#### Usage - -`docker dev start DEV_ENV_NAME` - -### `docker dev stop` - -#### Usage - -`docker dev stop DEV_ENV_NAME` - -### `docker dev version` - -#### Usage - -`docker dev version [OPTIONS]` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:----------------------------------------------| -| `--format`,`-f` | Format the output. | -| `--short`,`-s` | Shows only Docker Dev's version number. | diff --git a/content/manuals/desktop/features/dev-environments/set-up.md b/content/manuals/desktop/features/dev-environments/set-up.md deleted file mode 100644 index 8239abb9343..00000000000 --- a/content/manuals/desktop/features/dev-environments/set-up.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -description: Set up a dev Environments -keywords: Dev Environments, share, set up, Compose, Docker Desktop -title: Set up a dev environment -weight: 20 -aliases: -- /desktop/dev-environments/set-up/ ---- - -{{% include "dev-envs-changing.md" %}} - ->**Changes to Dev Environments with Docker Desktop 4.13** -> ->Docker has simplified how you configure your dev environment project. All you need to get started is a `compose-dev.yaml` file. If you have an existing project with a `.docker/` folder this is automatically migrated the next time you launch. -> -> If you are using `.docker/docker-compose.yaml`, we move it to `../compose-dev.yaml`. ->If you are using `.docker/config.json`, we create a `../compose-dev.yaml` file with a single service named "app”. It is configured to use the image or Dockerfile referenced in the JSON as a starting point. - -To set up a dev environment, there are additional configuration steps to tell Docker Desktop how to build, start, and use the right image for your services. - -Dev Environments use a `compose-dev.yaml` file located at the root of your project. This file allows you to define the image required for a dedicated service, the ports you'd like to expose, along with additional configuration options. - -The following is an example `compose-dev.yaml` file. - -```yaml -version: "3.7" -services: - backend: - build: - context: backend - target: development - secrets: - - db-password - depends_on: - - db - db: - image: mariadb - restart: always - healthcheck: - test: [ "CMD", "mysqladmin", "ping", "-h", "127.0.0.1", "--silent" ] - interval: 3s - retries: 5 - start_period: 30s - secrets: - - db-password - volumes: - - db-data:/var/lib/mysql - environment: - - MYSQL_DATABASE=example - - MYSQL_ROOT_PASSWORD_FILE=/run/secrets/db-password - expose: - - 3306 - proxy: - build: proxy - ports: - - 8080:80 - depends_on: - - backend -volumes: - db-data: -secrets: - db-password: - file: db/password.txt -``` - -In the yaml file, the build context `backend` specifies that that the container should be built using the `development` stage (`target` attribute) of the Dockerfile located in the `backend` directory (`context` attribute) - -The `development` stage of the Dockerfile is defined as follows: - -```dockerfile -# syntax=docker/dockerfile:1 -FROM golang:1.16-alpine AS build -WORKDIR /go/src/github.com/org/repo -COPY . . -RUN go build -o server . -FROM build AS development -RUN apk update \ - && apk add git -CMD ["go", "run", "main.go"] -FROM alpine:3.12 -EXPOSE 8000 -COPY --from=build /go/src/github.com/org/repo/server /server -CMD ["/server"] -``` - -The `development` target uses a `golang:1.16-alpine` image with all dependencies you need for development. You can start your project directly from VS Code and interact with the others applications or services such as the database or the frontend. - -In the example, the Docker Compose files are the same. However, they could be different and the services defined in the main Compose file may use other targets to build or directly reference other images. - -## What's next? - -Learn how to [distribute your dev environment](share.md) diff --git a/content/manuals/desktop/features/dev-environments/share.md b/content/manuals/desktop/features/dev-environments/share.md deleted file mode 100644 index 41bd8c482d4..00000000000 --- a/content/manuals/desktop/features/dev-environments/share.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -description: Dev Environments -keywords: Dev Environments, share, Docker Desktop -title: Distribute your dev environment -weight: 30 -aliases: -- /desktop/dev-environments/share/ ---- - -{{% include "dev-envs-changing.md" %}} - -The `compose-dev.yaml` config file makes distributing your dev environment easy so everyone can access the same code and any dependencies. - -### Distribute your dev environment - -When you are ready to share your environment, simply copy the link to the Github repo where your project is stored, and share the link with your team members. - -You can also create a link that automatically starts your dev environment when opened. This can then be placed on a GitHub README or pasted into a Slack channel, for example. - -To create the link simply join the following link with the link to your dev environment's GitHub repository: - -`https://open.docker.com/dashboard/dev-envs?url=` - -The following example opens a [Compose sample](https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql), a Go server with an Nginx proxy and a MariaDB/MySQL database, in Docker Desktop. - -[https://open.docker.com/dashboard/dev-envs?url=https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql](https://open.docker.com/dashboard/dev-envs?url=https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql) - -### Open a dev environment that has been distributed to you - -To open a dev environment that has been shared with you, select the **Create** button in the top right-hand corner, select source **Existing Git repo**, and then paste the URL. diff --git a/content/manuals/desktop/features/gpu.md b/content/manuals/desktop/features/gpu.md index 6e69184205f..661d31e3566 100644 --- a/content/manuals/desktop/features/gpu.md +++ b/content/manuals/desktop/features/gpu.md @@ -11,7 +11,7 @@ aliases: > [!NOTE] > -> Currently GPU support in Docker Desktop is only available on Windows with the WSL2 backend. +> GPU support in Docker Desktop is only available on Windows with the WSL2 backend. Docker Desktop for Windows supports NVIDIA GPU Paravirtualization (GPU-PV) on NVIDIA GPUs, allowing containers to access GPU resources for compute-intensive workloads like AI, machine learning, or video processing. @@ -63,16 +63,28 @@ GPU Device 0: "GeForce RTX 2060 with Max-Q Design" with compute capability 7.5 = 2724.379 single-precision GFLOP/s at 20 flops per interaction ``` -## Run a real-world model: Llama2 with Ollama +## Run a real-world model: SmolLM2 with Docker Model Runner -Use the [official Ollama image](https://hub.docker.com/r/ollama/ollama) to run the Llama2 LLM with GPU acceleration: +Use Docker Model Runner to run the SmolLM2 LLM with vLLM and GPU acceleration: ```console -$ docker run --gpus=all -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama +$ docker model install-runner --backend vllm --gpu cuda ``` -Then start the model: +Check it's correctly installed: ```console -$ docker exec -it ollama ollama run llama2 +$ docker model status +Docker Model Runner is running + +Status: +llama.cpp: running llama.cpp version: c22473b +vllm: running vllm version: 0.11.0 +``` + +Run the model: + +```console +$ docker model run ai/smollm2-vllm hi +Hello! I'm sure everything goes smoothly here. How can I assist you today? ``` diff --git a/content/manuals/desktop/features/kubernetes.md b/content/manuals/desktop/features/kubernetes.md deleted file mode 100644 index 6319a7955a2..00000000000 --- a/content/manuals/desktop/features/kubernetes.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -description: See how you can deploy to Kubernetes on Docker Desktop -keywords: deploy, kubernetes, kubectl, orchestration, Docker Desktop -title: Deploy on Kubernetes with Docker Desktop -linkTitle: Deploy on Kubernetes -aliases: -- /docker-for-windows/kubernetes/ -- /docker-for-mac/kubernetes/ -- /desktop/kubernetes/ -weight: 60 ---- - -Docker Desktop includes a standalone Kubernetes server and client, as well as Docker CLI integration, enabling local Kubernetes development and testing directly on your machine. - -The Kubernetes server runs as a single or multi-node cluster, within Docker container(s). This lightweight setup helps you explore Kubernetes features, test workloads, and work with container orchestration in parallel with other Docker functionalities. - -Kubernetes on Docker Desktop runs alongside other workloads, including Swarm services and standalone containers. - -![k8s settings](../images/k8s-settings.png) - -## What happens when I enable Kubernetes in Docker Desktop? - -The following actions are triggered in the Docker Desktop backend and VM: - -- Generation of certificates and cluster configuration -- Download and installation of Kubernetes internal components -- Cluster bootup -- Installation of additional controllers for networking and storage - -Turning the Kubernetes server on or off in Docker Desktop does not affect your other workloads. - -## Install and turn on Kubernetes - -1. Open the Docker Desktop Dashboard and navigate to **Settings**. -2. Select the **Kubernetes** tab. -3. Toggle on **Enable Kubernetes**. -4. Choose your [cluster provisioning method](#cluster-provisioning-method). -5. Select **Apply & Restart** to save the settings. - -This sets up the images required to run the Kubernetes server as containers, and installs the `kubectl` command-line tool on your system at `/usr/local/bin/kubectl` (Mac) or `C:\Program Files\Docker\Docker\resources\bin\kubectl.exe` (Windows). - - > [!NOTE] - > - > Docker Desktop for Linux does not include `kubectl` by default. You can install it separately by following the [Kubernetes installation guide](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/). Ensure the `kubectl` binary is installed at `/usr/local/bin/kubectl`. - -When Kubernetes is enabled, its status is displayed in the Docker Desktop Dashboard footer and the Docker menu. - -You can check which version of Kubernetes you're on with: - -```console -$ kubectl version -``` - -### Cluster provisioning method - -Docker Desktop Kubernetes can be provisioned with either the `kubeadm` or `kind` -provisioners. - -`kubeadm` is the older provisioner. It supports a single-node cluster, you can't select the kubernetes -version, it's slower to provision than `kind`, and it's not supported by [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/index.md) (ECI), -meaning that if ECI is enabled the cluster works but it's not protected by ECI. - -`kind` is the newer provisioner, and it's available if you are signed in and are -using Docker Desktop version 4.38 or later. It supports multi-node clusters (for -a more realistic Kubernetes setup), you can choose the Kubernetes version, it's -faster to provision than `kubeadm`, and it's supported by ECI (i.e., when ECI is -enabled, the Kubernetes cluster runs in unprivileged Docker containers, thus -making it more secure). Note however that `kind` requires that Docker Desktop be -configured to use the [containerd image store](containerd.md) (the default image -store in Docker Desktop 4.34 and later). - -The following table summarizes this comparison. - -| Feature | `kubeadm` | `kind` | -| :------ | :-----: | :--: | -| Availability | Docker Desktop 4.0+ | Docker Desktop 4.38+ (requires sign in) | -| Multi-node cluster support | No | Yes | -| Kubernetes version selector | No | Yes | -| Speed to provision | ~1 min | ~30 seconds | -| Supported by ECI | No | Yes | -| Works with containerd image store | Yes | Yes | -| Works with Docker image store | Yes | No | - -## Using the kubectl command - -Kubernetes integration automatically installs the Kubernetes CLI command -at `/usr/local/bin/kubectl` on Mac and at `C:\Program Files\Docker\Docker\Resources\bin\kubectl.exe` on Windows. This location may not be in your shell's `PATH` -variable, so you may need to type the full path of the command or add it to -the `PATH`. - -If you have already installed `kubectl` and it is -pointing to some other environment, such as `minikube` or a Google Kubernetes Engine cluster, ensure you change the context so that `kubectl` is pointing to `docker-desktop`: - -```console -$ kubectl config get-contexts -$ kubectl config use-context docker-desktop -``` - -> [!TIP] -> -> If the `kubectl` config get-contexts command returns an empty result, try: -> -> - Running the command in the Command Prompt or PowerShell. -> - Setting the `KUBECONFIG` environment variable to point to your `.kube/config` file. - -### Verify installation - -To confirm that Kubernetes is running, list the available nodes: - -```console -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -docker-desktop Ready control-plane 3h v1.29.1 -``` - -If you installed `kubectl` using Homebrew, or by some other method, and -experience conflicts, remove `/usr/local/bin/kubectl`. - -For more information about `kubectl`, see the -[`kubectl` documentation](https://kubernetes.io/docs/reference/kubectl/overview/). - -## Upgrade your cluster - -Kubernetes clusters are not automatically upgraded with Docker Desktop updates. To upgrade the cluster, you must manually select **Reset Kubernetes Cluster** in settings. - -## Additional settings - -### Viewing system containers - -By default, Kubernetes system containers are hidden. To inspect these containers, enable **Show system containers (advanced)**. - -You can now view the running Kubernetes containers with `docker ps` or in the Docker Desktop Dashboard. - -### Configuring a custom image registry for Kubernetes control plane images - -Docker Desktop uses containers to run the Kubernetes control plane. By default, Docker Desktop pulls -the associated container images from Docker Hub. The images pulled depend on the [cluster provisioning mode](#cluster-provisioning-method). - -For example, in `kind` mode it requires the following images: - -```console -docker.io/kindest/node: -docker.io/docker/desktop-cloud-provider-kind: -docker.io/docker/desktop-containerd-registry-mirror: -``` - -In `kubeadm` mode it requires the following images: - -```console -docker.io/registry.k8s.io/kube-controller-manager: -docker.io/registry.k8s.io/kube-apiserver: -docker.io/registry.k8s.io/kube-scheduler: -docker.io/registry.k8s.io/kube-proxy -docker.io/registry.k8s.io/etcd: -docker.io/registry.k8s.io/pause: -docker.io/registry.k8s.io/coredns/coredns: -docker.io/docker/desktop-storage-provisioner: -docker.io/docker/desktop-vpnkit-controller: -docker.io/docker/desktop-kubernetes: -``` - -The image tags are automatically selected by Docker Desktop based on several -factors, including the version of Kubernetes being used. The tags vary for each image. - -To accommodate scenarios where access to Docker Hub is not allowed, admins can -configure Docker Desktop to pull the above listed images from a different registry (e.g., a mirror) -using the [KubernetesImagesRepository](../../security/for-admins/hardened-desktop/settings-management/configure-json-file.md#kubernetes) setting as follows. - -An image name can be broken into `[registry[:port]/][namespace/]repository[:tag]` components. -The `KubernetesImagesRepository` setting allows users to override the `[registry[:port]/][namespace]` -portion of the image's name. - -For example, if Docker Desktop Kubernetes is configured in `kind` mode and -`KubernetesImagesRepository` is set to `my-registry:5000/kind-images`, then -Docker Desktop will pull the images from: - -```console -my-registry:5000/kind-images/node: -my-registry:5000/kind-images/desktop-cloud-provider-kind: -my-registry:5000/kind-images/desktop-containerd-registry-mirror: -``` - -These images should be cloned/mirrored from their respective images in Docker Hub. The tags must -also match what Docker Desktop expects. - -The recommended approach to set this up is the following: - -1) Start Docker Desktop. - -2) In Settings > Kubernetes, enable the *Show system containers* setting. - -3) In Settings > Kubernetes, start Kubernetes using the desired cluster provisioning method: `kubeadm` or `kind`. - -4) Wait for Kubernetes to start. - -5) Use `docker ps` to view the container images used by Docker Desktop for the Kubernetes control plane. - -6) Clone or mirror those images (with matching tags) to your custom registry. - -7) Stop the Kubernetes cluster. - -8) Configure the `KubernetesImagesRepository` setting to point to your custom registry. - -9) Restart Docker Desktop. - -10) Verify that the Kubernetes cluster is using the custom registry images using the `docker ps` command. - -> [!NOTE] -> -> The `KubernetesImagesRepository` setting only applies to control plane images used by Docker Desktop -> to set up the Kubernetes cluster. It has no effect on other Kubernetes pods. - -> [!NOTE] -> -> When using `KubernetesImagesRepository` and [Enhanced Container Isolation (ECI)](../../security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) -> is enabled, add the following images to the [ECI Docker socket mount image list](../../security/for-admins/hardened-desktop/settings-management/configure-json-file.md#enhanced-container-isolation): -> -> * [imagesRepository]/desktop-cloud-provider-kind:* -> * [imagesRepository]/desktop-containerd-registry-mirror:* -> -> These containers mount the Docker socket, so you must add the images to the ECI images list. If not, -> ECI will block the mount and Kubernetes won't start. - -## Troubleshooting - -- If Kubernetes fails to start, make sure Docker Desktop is running with enough allocated resources. Check **Settings** > **Resources**. -- If the `kubectl` commands return errors, confirm the context is set to `docker-desktop` - ```console - $ kubectl config use-context docker-desktop - ``` - You can then try checking the logs of the [Kubernetes system containers](#viewing-system-containers) if you have enabled that setting. -- If you're experiencing cluster issues after updating, reset your Kubernetes cluster. Resetting a Kubernetes cluster can help resolve issues by essentially reverting the cluster to a clean state, and clearing out misconfigurations, corrupted data, or stuck resources that may be causing problems. If the issue still persists, you may need to clean and purge data, and then restart Docker Desktop. - -## Turn off and uninstall Kubernetes - -To turn off Kubernetes in Docker Desktop: - -1. From the Docker Desktop Dashboard, select the **Settings** icon. -2. Select the **Kubernetes** tab. -3. Deselect the **Enable Kubernetes** checkbox. -4. Select **Apply & Restart** to save the settings. This stops and removes Kubernetes containers, and also removes the `/usr/local/bin/kubectl` command. diff --git a/content/manuals/desktop/features/networking.md b/content/manuals/desktop/features/networking.md deleted file mode 100644 index 58d9a73c9b3..00000000000 --- a/content/manuals/desktop/features/networking.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -description: Understand how networking works on Docker Desktop and see the known limitations -keywords: networking, docker desktop, proxy, vpn, Linux, Mac, Windows -title: Explore networking features on Docker Desktop -linkTitle: Networking -aliases: -- /desktop/linux/networking/ -- /docker-for-mac/networking/ -- /mackit/networking/ -- /desktop/mac/networking/ -- /docker-for-win/networking/ -- /docker-for-windows/networking/ -- /desktop/windows/networking/ -- /desktop/networking/ -weight: 30 ---- - -Docker Desktop includes built-in networking capabilities to help you connect containers with services on your host, across containers, or through proxies and VPNs. - -## Networking features for all platforms - -### VPN Passthrough - -Docker Desktop networking can work when attached to a VPN. To do this, -Docker Desktop intercepts traffic from the containers and injects it into -the host as if it originated from the Docker application. - -### Port mapping - -When you run a container with the `-p` argument, for example: - -```console -$ docker run -p 80:80 -d nginx -``` - -Docker Desktop makes whatever is running on port `80` in the container, in -this case, `nginx`, available on port `80` of `localhost`. In this example, the -host and container ports are the same. - -To avoid conflicts with services already using port `80` on the host: - -```console -$ docker run -p 8000:80 -d nginx -``` - -Now connections to `localhost:8000` are sent to port `80` in the container. - -> [!TIP] -> -> The syntax for `-p` is `HOST_PORT:CLIENT_PORT`. - -### HTTP/HTTPS Proxy support - -See [Proxies](/manuals/desktop/settings-and-maintenance/settings.md#proxies) - -### SOCKS5 proxy support - -{{< summary-bar feature_name="SOCKS5 proxy support" >}} - -SOCKS (Socket Secure) is a protocol that facilitates the routing of network packets between a client and a server through a proxy server. It provides a way to enhance privacy, security, and network performance for users and applications. - -You can enable SOCKS proxy support to allow outgoing requests, such as pulling images, and access Linux container backend IPs from the host. - -To enable and set up SOCKS proxy support: - -1. Navigate to the **Resources** tab in **Settings**. -2. From the dropdown menu select **Proxies**. -3. Switch on the **Manual proxy configuration** toggle. -4. In the **Secure Web Server HTTPS** box, paste your `socks5://host:port` URL. - -## Networking features for Mac and Linux - -### SSH agent forwarding - -Docker Desktop for Mac and Linux lets you use the host’s SSH agent inside a container. To do this: - -1. Bind mount the SSH agent socket by adding the following parameter to your `docker run` command: - - ```console - $--mount type=bind,src=/run/host-services/ssh-auth.sock,target=/run/host-services/ssh-auth.sock - ``` - -2. Add the `SSH_AUTH_SOCK` environment variable in your container: - - ```console - $ -e SSH_AUTH_SOCK="/run/host-services/ssh-auth.sock" - ``` - -To enable the SSH agent in Docker Compose, add the following flags to your service: - - ```yaml -services: - web: - image: nginx:alpine - volumes: - - type: bind - source: /run/host-services/ssh-auth.sock - target: /run/host-services/ssh-auth.sock - environment: - - SSH_AUTH_SOCK=/run/host-services/ssh-auth.sock - ``` - -## Known limitations - -### Changing internal IP addresses - -The internal IP addresses used by Docker can be changed from **Settings**. After changing IPs, you need to reset the Kubernetes cluster and to leave any active Swarm. - -### There is no `docker0` bridge on the host - -Because of the way networking is implemented in Docker Desktop, you cannot -see a `docker0` interface on the host. This interface is actually within the -virtual machine. - -### I cannot ping my containers - -Docker Desktop can't route traffic to Linux containers. However if you're a Windows user, you can -ping the Windows containers. - -### Per-container IP addressing is not possible - -This is because the Docker `bridge` network is not reachable from the host. -However if you are a Windows user, per-container IP addressing is possible with Windows containers. - -## Use cases and workarounds - -### I want to connect from a container to a service on the host - -The host has a changing IP address, or none if you have no network access. -Docker recommends you connect to the special DNS name `host.docker.internal`, -which resolves to the internal IP address used by the host. - -You can also reach the gateway using `gateway.docker.internal`. - -If you have installed Python on your machine, use the following instructions as an example to connect from a container to a service on the host: - -1. Run the following command to start a simple HTTP server on port 8000. - - `python -m http.server 8000` - - If you have installed Python 2.x, run `python -m SimpleHTTPServer 8000`. - -2. Now, run a container, install `curl`, and try to connect to the host using the following commands: - - ```console - $ docker run --rm -it alpine sh - # apk add curl - # curl http://host.docker.internal:8000 - # exit - ``` - -### I want to connect to a container from the host - -Port forwarding works for `localhost`. `--publish`, `-p`, or `-P` all work. -Ports exposed from Linux are forwarded to the host. - -Docker recommends you publish a port, or to connect from another -container. This is what you need to do even on Linux if the container is on an -overlay network, not a bridge network, as these are not routed. - -For example, to run an `nginx` webserver: - -```console -$ docker run -d -p 80:80 --name webserver nginx -``` - -To clarify the syntax, the following two commands both publish container's port `80` to host's port `8000`: - -```console -$ docker run --publish 8000:80 --name webserver nginx - -$ docker run -p 8000:80 --name webserver nginx -``` - -To publish all ports, use the `-P` flag. For example, the following command -starts a container (in detached mode) and the `-P` flag publishes all exposed ports of the -container to random ports on the host. - -```console -$ docker run -d -P --name webserver nginx -``` - -Alternatively, you can also use [host networking](/manuals/engine/network/drivers/host.md#docker-desktop) -to give the container direct access to the network stack of the host. - -See the [run command](/reference/cli/docker/container/run.md) for more details on -publish options used with `docker run`. diff --git a/content/manuals/desktop/features/networking/_index.md b/content/manuals/desktop/features/networking/_index.md new file mode 100644 index 00000000000..515dbc91f7c --- /dev/null +++ b/content/manuals/desktop/features/networking/_index.md @@ -0,0 +1,96 @@ +--- +description: Understand how Docker Desktop handles networking, firewalls, file access, proxies, and endpoint visibility. +keywords: docker desktop, networking, architecture, firewall, proxies, crowdstrike, vpn +title: Networking on Docker Desktop +linkTitle: Networking +weight: 30 +--- + +This page explains how Docker Desktop routes network traffic and file I/O between containers, the VM, and the host, and how this behavior is visible to firewalls and endpoint protection tools. + +## Overview + +Docker Desktop runs the Docker Engine inside a lightweight Linux virtual machine (VM). Depending on your system configuration and operating system, Docker Desktop routes network and file operations between the Docker VM and the host using different backend components. + +### Backend components and responsibilities + +The backend acts as: + +- Network proxy: Translates traffic between the host and Linux VM. + - On Windows and Mac, this is handled by the `com.docker.backend` process. + - On Linux, the `qemu` process performs this function. +- File server: Handles file access from containers to the host filesystem. + - When using gRPC FUSE, the backend performs the file sharing. + - When using `virtiofs`, `osxfs`, or `krun`, file access is handled by those respective daemons rather than the backend process. +- Control plane: Manages Docker API calls, port forwarding, and proxy configuration. + +The following table summarizes typical setups in more detail: + +| Platform | Setup | Networking handled by | File sharing handled by | Notes | +| --------------- | ------------------------------------ | ------------------------ | -------------------------------------- | --------------------------------------------------------- | +| Windows | Hyper-V | `com.docker.backend.exe` | `com.docker.backend.exe` | Simplest setup with full visibility to EDR/firewall tools | +| Windows (WSL 2) | WSL 2 | `com.docker.backend.exe` | WSL 2 kernel (no visibility from host) | Recommended only when WSL 2 integration is needed | +| Mac | Virtualization framework + gRPC FUSE | `com.docker.backend` | `com.docker.backend` | Recommended for performance and visibility | +| Mac | Virtualization framework + `virtiofs`| `com.docker.backend` | Apple's Virtualization framework | Higher performance but no file access visibility from host| +| Mac | Virtualization framework + `osxfs` | `com.docker.backend` | `osxfs` | Legacy setup, not recommended | +| Mac | DockerVMM + `virtiofs` | `com.docker.backend` | `krun` | Currently in Beta | +| Linux | Native Linux VM | `qemu` | `virtiofsd` | No `com.docker.backend` process on Linux | + + +## How containers connect to the internet + +Each Linux container in Docker Desktop runs inside a small virtual network managed by Docker and every container is attached to a Docker-managed network and receives its own internal IP address. You can view and manage these networks with `docker network ls`, `docker network create`, and `docker network inspect`. They are managed by the [`daemon.json`](/manuals/engine/daemon/_index.md). + +When a container initiates a network request, for example with `apt-get update` or `docker pull`: + +- The container’s `eth0` interface connects to a virtual bridge (`docker0`) inside the VM. +- Outbound traffic from the container is sent through Network Address Translation (NAT) using a virtual adapter (typically with an internal IP such as `192.168.65.3`). You can view or change this with the [Docker Desktop settings](/manuals/desktop/settings-and-maintenance/settings.md#network). +- The traffic is transferred to the host system over a shared-memory channel rather than through a traditional virtual network interface. This approach ensures reliable communication and avoids conflicts with host-level network adapters or firewall configurations. +- On the host, Docker Desktop’s backend process receives the traffic and creates standard TCP/IP connections using the same networking APIs as other applications. + +All outbound container network traffic originates from the `com.docker.backend` process. Firewalls, VPNs, and security tools, like Crowdstrike, see traffic coming from this process — not from a VM or unknown source so firewall and endpoint security software can apply rules directly to `com.docker.backend`. + +## How exposed ports work + +When you publish a container port using the `-p` or `--publish` flag, Docker Desktop makes that container port accessible from your host system or local network. + +For example, with `docker run -p 80:80 nginx`: + +- Docker Desktop's backend process listens on the specified host port, in this case, port `80`. +- When an application such as a web browser connects to that port, Docker Desktop forwards the connection into the Linux VM where the container is running over a shared-memory channel. +- Inside the VM, the connection is routed to the container’s internal IP address and port, for example `172.17.0.2:80`. +- The container responds through the same path, so you can access it from your host just like any other local service. + +By default, `docker run -p` listens on all network interfaces (`0.0.0.0`), but you can restrict it to a specific address, such as `127.0.0.1` (`localhost`) or a particular network adapter. This behavior can be modified to bind to `localhost` by default in [Docker Desktop's network settings](/manuals/desktop/settings-and-maintenance/settings.md#network) + +Host firewalls can permit or deny inbound connections by filtering on `com.docker.backend`. + +## Using Docker Desktop with a proxy + +Docker Desktop can use your system’s default proxy settings or custom settings that you configure with [Docker Desktop's proxy setting](/manuals/desktop/settings-and-maintenance/settings.md#proxies). All proxy traffic passes through `com.docker.backend.exe`. + +When a proxy is enabled: + +- The backend process forwards the network requests, for example `docker pull`, through an internal proxy at `http.docker.internal:3128`. +- The internal proxy then connects either directly to the internet or through your upstream proxy, depending on your configuration and adding authentication if necessary. +- Docker Desktop then downloads the requested images or data through the proxy as usual. + +Note that: +- The proxy honors system or manual proxy configuration. +- On Windows, Basic, NTLM, and Kerberos authentication is supported. +- For Mac, NTLM/Kerberos is not supported natively. Run a local proxy on `localhost` as a workaround. +- CLI plugins and other tools that use the Docker API directly must be configured separately with the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables. + +## Firewalls and endpoint visibility + +To restrict VM or container networking apply rules to `com.docker.backend.exe` (Windows) `com.docker.backend` (Mac) or `qemu` (Linux) as all VM networking is funneled through these processes. + +Use Windows Defender Firewall or enterprise endpoint firewalls for control. This enables traffic inspection and restriction at the host level without modifying the Docker Engine. + +Crowdstrike and similar tools can observe all traffic and file access that passes through the backend process. + +| Action | Visible to host EDR? | Reason | +|---------|----------------------|---------| +| Container reads host files | Yes | Access handled by `com.docker.backend` | +| Container writes host files | Yes | Same process performs the write | +| Container accesses its own filesystem layers | No | Exists only inside the VM | diff --git a/content/manuals/desktop/features/networking/networking-how-tos.md b/content/manuals/desktop/features/networking/networking-how-tos.md new file mode 100644 index 00000000000..ae9f117ca2c --- /dev/null +++ b/content/manuals/desktop/features/networking/networking-how-tos.md @@ -0,0 +1,190 @@ +--- +description: Learn how to connect containers to the host, across containers, or through proxies and VPNs in Docker Desktop. +keywords: docker desktop, networking, vpn, proxy, port mapping, dns +title: Explore networking how-tos on Docker Desktop +linkTitle: How-tos +aliases: +- /desktop/linux/networking/ +- /docker-for-mac/networking/ +- /mackit/networking/ +- /desktop/mac/networking/ +- /docker-for-win/networking/ +- /docker-for-windows/networking/ +- /desktop/windows/networking/ +- /desktop/networking/ +--- + +This page explains how to configure and use networking features, connect containers to host services, work behind proxies or VPNs, and troubleshoot common issues. + +For details on how Docker Desktop routes network traffic and file I/O between containers, the VM, and the host, see [Network overview](/manuals/desktop/features/networking/_index.md#overview). + +## Core networking how-tos + +### Connect a container to a service on the host + +The host has a changing IP address, or none if you have no network access. To connect to services running on your host, use the special DNS name: + +| Name | Description | +| ------------------------- | ------------------------------------------------ | +| `host.docker.internal` | Resolves to the internal IP address of your host | +| `gateway.docker.internal` | Resolves to the gateway IP of the Docker VM | + + +#### Example + +Run a simple HTTP server on port `8000`: + +```console +$ python -m http.server 8000 +``` + +Then run a container, install `curl`, and try to connect to the host using the following commands: + +```console +$ docker run --rm -it alpine sh +# apk add curl +# curl http://host.docker.internal:8000 +# exit +``` + +### Connect to a container from the host + +To access containerized services from your host or local network, publish ports with the `-p` or `--publish` flag. For example: + +```console +$ docker run -d -p 80:80 --name webserver nginx +``` + +Docker Desktop makes whatever is running on port `80` in the container, in +this case, `nginx`, available on port `80` of `localhost`. + +> [!TIP] +> +> The syntax for `-p` is `HOST_PORT:CLIENT_PORT`. + +To publish all ports, use the `-P` flag. For example, the following command +starts a container (in detached mode) and the `-P` flag publishes all exposed ports of the +container to random ports on the host. + +```console +$ docker run -d -P --name webserver nginx +``` + +Alternatively, you can also use [host networking](/manuals/engine/network/drivers/host.md#docker-desktop) +to give the container direct access to the network stack of the host. + +See the [run command](/reference/cli/docker/container/run/) for more details on +publish options used with `docker run`. + +All inbound connections pass through the Docker Desktop backend process (`com.docker.backend` (Mac), `com.docker.backend` (Windows), or `qemu` (Linux), which handles port forwarding into the VM. +For more details, see [How exposed ports work](/manuals/desktop/features/networking/_index.md#how-exposed-ports-work) + +### Working with VPNs + +Docker Desktop networking can work when attached to a VPN. + +To do this, Docker Desktop intercepts traffic from the containers and injects it into +the host as if it originated from the Docker application. + +For details about how this traffic appears to host firewalls and endpoint detection systems, see [Firewalls and endpoint visibility](/manuals/desktop/features/networking/_index.md#firewalls-and-endpoint-visibility). + +### Working with proxies + +Docker Desktop can use your system proxy or a manual configuration. +To configure proxies: + +1. Navigate to the **Resources** tab in **Settings**. +2. From the dropdown menu select **Proxies**. +3. Switch on the **Manual proxy configuration** toggle. +4. Enter your HTTP, HTTPS or SOCKS5 proxy URLS. + +For more details on proxies and proxy configurations, see the [Proxy settings documentation](/manuals/desktop/settings-and-maintenance/settings.md#proxies). + +## Network how-tos for Mac and Windows + +You can control how Docker handles container networking and DNS resolution to better support a range of environments — from IPv4-only to dual-stack and IPv6-only systems. These settings help prevent timeouts and connectivity issues caused by incompatible or misconfigured host networks. + +You can set the following settings on the **Network** tab in the Docker Desktop Dashboard settings, or if you're an admin, with Settings Management via the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md#networking), or the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +> [!NOTE] +> +> These settings can be overridden on a per-network basis using CLI flags or Compose file options. + +### Default networking mode + +Choose the default IP protocol used when Docker creates new networks. This allows you to align Docker with your host’s network capabilities or organizational requirements, such as enforcing IPv6-only access. + +| Mode | Description | +| ---------------------------- | ------------------------------------------- | +| **Dual IPv4/IPv6 (default)** | Supports both IPv4 and IPv6. Most flexible. | +| **IPv4 only** | Uses only IPv4 addressing. | +| **IPv6 only** | Uses only IPv6 addressing. | + +### DNS resolution behavior + +Control how Docker filters DNS records returned to containers, improving reliability in environments where only IPv4 or IPv6 is supported. This setting is especially useful for preventing apps from trying to connect using IP families that aren't actually available, which can cause avoidable delays or failures. + +| Option | Description | +| ------------------------------ | --------------------------------------------------------------------------- | +| **Auto (recommended)** | Automatically filters unsupported record types. (A for IPv4, AAAA for IPv6) | +| **Filter IPv4 (A records)** | Blocks IPv4 lookups. Only available in dual-stack mode. | +| **Filter IPv6 (AAAA records)** | Blocks IPv6 lookups. Only available in dual-stack mode. | +| **No filtering** | Returns both A and AAAA records. | + +> [!IMPORTANT] +> +> Switching the default networking mode resets the DNS filter to Auto. + +## Network how-tos for Mac and Linux + +### SSH agent forwarding + +Docker Desktop for Mac and Linux lets you use the host’s SSH agent inside a container. To do this: + +1. Bind mount the SSH agent socket by adding the following parameter to your `docker run` command: + + ```console + $--mount type=bind,src=/run/host-services/ssh-auth.sock,target=/run/host-services/ssh-auth.sock + ``` + +2. Add the `SSH_AUTH_SOCK` environment variable in your container: + + ```console + $ -e SSH_AUTH_SOCK="/run/host-services/ssh-auth.sock" + ``` + +To enable the SSH agent in Docker Compose, add the following flags to your service: + + ```yaml +services: + web: + image: nginx:alpine + volumes: + - type: bind + source: /run/host-services/ssh-auth.sock + target: /run/host-services/ssh-auth.sock + environment: + - SSH_AUTH_SOCK=/run/host-services/ssh-auth.sock + ``` + +## Known limitations + +### Changing internal IP addresses + +The internal IP addresses used by Docker can be changed from **Settings**. After changing IPs, you need to reset the Kubernetes cluster and to leave any active Swarm. + +### There is no `docker0` bridge on the host + +Because of the way networking is implemented in Docker Desktop, you cannot +see a `docker0` interface on the host. This interface is actually within the +virtual machine. + +### I cannot ping my containers + +Docker Desktop can't route traffic to Linux containers. However if you're a Windows user, you can +ping the Windows containers. + +### Per-container IP addressing is not possible + +This is because the Docker `bridge` network is not reachable from the host. +However if you are a Windows user, per-container IP addressing is possible with Windows containers. diff --git a/content/manuals/desktop/features/synchronized-file-sharing.md b/content/manuals/desktop/features/synchronized-file-sharing.md index d0ca03e66eb..bfda2c93d11 100644 --- a/content/manuals/desktop/features/synchronized-file-sharing.md +++ b/content/manuals/desktop/features/synchronized-file-sharing.md @@ -10,8 +10,6 @@ aliases: {{< summary-bar feature_name="Synchronized file sharing" >}} Synchronized file shares is an alternative file sharing mechanism that provides fast and flexible host-to-VM file sharing, enhancing bind mount performance through the use of synchronized filesystem caches. - -![Image of Synchronized file shares pane](../images/synched-file-shares.webp) ## Who is it for? @@ -49,12 +47,7 @@ When the status indicator displays **Watching for filesystem changes**, your fil > [!NOTE] > -> When you create a new service, setting the [bind mount option consistency](/reference/cli/docker/service/create.md#options-for-bind-mounts) to `:consistent` bypasses Synchronized file shares. - -> [!TIP] -> -> Docker Compose can automatically create file shares for bind mounts. -> Ensure you're signed in to Docker with a paid subscription and have enabled both **Access experimental features** and **Manage Synchronized file shares with Compose** in Docker Desktop's settings. +> When you create a new service, setting the [bind mount option consistency](/reference/cli/docker/service/create/#options-for-bind-mounts) to `:consistent` bypasses Synchronized file shares. ## Explore your file share instance @@ -82,7 +75,7 @@ In general, use your `.syncignore` file to exclude items that aren't critical to - Changes made to `.syncignore` don't lead to immediate deletions unless the file share is recreated. In other words, files that are newly ignored due to modifications in the `.syncignore` file remain in their current location, but are no longer updated during synchronization. -- File share instances are currently limited to approximately 2 million files per share. For best performance, if you have a file share instance of this size, try to decompose it into multiple shares corresponding to individual bind mount locations. +- File share instances are limited to approximately 2 million files per share. For best performance, if you have a file share instance of this size, try to decompose it into multiple shares corresponding to individual bind mount locations. - Case conflicts, due to Linux being case-sensitive and macOS/Windows only being case-preserving, display as **File exists** problems in the GUI. These can be ignored. However, if they persist, you can report the issue. diff --git a/content/manuals/desktop/features/vmm.md b/content/manuals/desktop/features/vmm.md index 5e977f7c6aa..ce1f70be526 100644 --- a/content/manuals/desktop/features/vmm.md +++ b/content/manuals/desktop/features/vmm.md @@ -1,11 +1,11 @@ --- title: Virtual Machine Manager for Docker Desktop on Mac -linkTitle: Virtual Machine Manager +linkTitle: Virtual Machine Manager keywords: virtualization software, resource allocation, mac, docker desktop, vm monitoring, vm performance, apple silicon -description: Discover Docker Desktop for Mac's Virtual Machine Manager (VMM) options, including the new Docker VMM for Apple Silicon, offering enhanced performance and efficiency +description: Discover Docker Desktop for Mac's Virtual Machine Manager (VMM) options, including Docker VMM for Apple Silicon, offering enhanced performance and efficiency weight: 110 aliases: -- /desktop/vmm/ + - /desktop/vmm/ --- Docker Desktop supports multiple Virtual Machine Managers (VMMs) to power the Linux VM that runs containers. You can choose the most suitable option based on your system architecture (Intel or Apple Silicon), performance needs, and feature requirements. This page provides an overview of the available options. @@ -16,11 +16,12 @@ To change the VMM, go to **Settings** > **General** > **Virtual Machine Manager* {{< summary-bar feature_name="VMM" >}} -Docker VMM is a new, container-optimized hypervisor. By optimizing both the Linux kernel and hypervisor layers, Docker VMM delivers significant performance enhancements across common developer tasks. +Docker VMM is a container-optimized hypervisor. By optimizing both the Linux kernel and hypervisor layers, Docker VMM delivers significant performance enhancements across common developer tasks. Some key performance enhancements provided by Docker VMM include: - - Faster I/O operations: With a cold cache, iterating over a large shared filesystem with `find` is 2x faster than when the Apple Virtualization framework is used. - - Improved caching: With a warm cache, performance can improve by as much as 25x, even surpassing native Mac operations. + +- Faster I/O operations: With a cold cache, iterating over a large shared filesystem with `find` is 2x faster than when the Apple Virtualization framework is used. +- Improved caching: With a warm cache, performance can improve by as much as 25x, even surpassing native Mac operations. These improvements directly impact developers who rely on frequent file access and overall system responsiveness during containerized development. Docker VMM marks a significant leap in speed, enabling smoother workflows and faster iteration cycles. @@ -43,9 +44,9 @@ The Apple Virtualization framework is a stable and well-established option for m > [!NOTE] > -> QEMU will be deprecated on July 14, 2025. For more information, see the [blog announcement](https://www.docker.com/blog/docker-desktop-for-mac-qemu-virtualization-option-to-be-deprecated-in-90-days/) +> QEMU has been deprecated in versions 4.44 and later. For more information, see the [blog announcement](https://www.docker.com/blog/docker-desktop-for-mac-qemu-virtualization-option-to-be-deprecated-in-90-days/) -QEMU is a legacy virtualization option for Apple Silicon Macs, primarily supported for older use cases. +QEMU is a legacy virtualization option for Apple Silicon Macs, primarily supported for older use cases. Docker recommends transitioning to newer alternatives, such as Docker VMM or the Apple Virtualization framework, as they offer superior performance and ongoing support. Docker VMM, in particular, offers substantial speed improvements and a more efficient development environment, making it a compelling choice for developers working with Apple Silicon. @@ -55,6 +56,6 @@ Note that this is not related to using QEMU to emulate non-native architectures > [!NOTE] > -> HyperKit will be deprecated in a future release. +> HyperKit is deprecated. Docker recommends switching to the Apple Virtualization framework. -HyperKit is another legacy virtualization option, specifically for Intel-based Macs. Like QEMU, it is still available but considered deprecated. Docker recommends switching to modern alternatives for better performance and to future-proof your setup. \ No newline at end of file +HyperKit is a legacy virtualization option for Intel-based Macs. Docker recommends switching to modern alternatives for better performance and to future-proof your setup. diff --git a/content/manuals/desktop/features/wasm.md b/content/manuals/desktop/features/wasm.md index 35df7ca492c..7ea9942057e 100644 --- a/content/manuals/desktop/features/wasm.md +++ b/content/manuals/desktop/features/wasm.md @@ -15,6 +15,10 @@ params: {{< summary-bar feature_name="Wasm workloads" >}} +> [!IMPORTANT] +> +> Wasm workloads are deprecated and will be removed in a future Docker Desktop release. This feature is no longer actively maintained. + WebAssembly (Wasm) is a fast, light alternative to Linux and Windows containers. With Docker Desktop, you can now run Wasm workloads side by side with traditional containers. @@ -34,7 +38,7 @@ then pre-existing images and containers will be inaccessible. 1. Navigate to **Settings** in Docker Desktop. 2. In the **General** tab, check **Use containerd for pulling and storing images**. 3. Go to **Features in development** and check the **Enable Wasm** option. -4. Select **Apply & restart** to save the settings. +4. Select **Apply** to save the settings. 5. In the confirmation dialog, select **Install** to install the Wasm runtimes. Docker Desktop downloads and installs the following runtimes: diff --git a/content/manuals/desktop/features/wsl/_index.md b/content/manuals/desktop/features/wsl/_index.md index bba84c34e9e..7dd7df427a7 100644 --- a/content/manuals/desktop/features/wsl/_index.md +++ b/content/manuals/desktop/features/wsl/_index.md @@ -13,56 +13,48 @@ aliases: - /desktop/wsl/ --- -Windows Subsystem for Linux (WSL) 2 is a full Linux kernel built by Microsoft, which lets Linux distributions run without managing virtual machines. With Docker Desktop running on WSL 2, users can leverage Linux workspaces and avoid maintaining both Linux and Windows build scripts. In addition, WSL 2 provides improvements to file system sharing and boot time. +Windows Subsystem for Linux (WSL) 2 is a full Linux kernel built by Microsoft that lets Linux distributions run without managing virtual machines. With Docker Desktop running on WSL 2, users can leverage Linux workspaces and avoid maintaining both Linux and Windows build scripts. In addition, WSL 2 provides improvements to file system sharing, faster cold-start times, and dynamic resource allocation. -Docker Desktop uses the dynamic memory allocation feature in WSL 2 to improve the resource consumption. This means Docker Desktop only uses the required amount of CPU and memory resources it needs, while allowing CPU and memory-intensive tasks such as building a container, to run much faster. - -Additionally, with WSL 2, the time required to start a Docker daemon after a cold start is significantly faster. +Because WSL 2 uses dynamic memory allocation, Docker Desktop requests only the CPU and memory it actually needs — freeing resources for the rest of your system, while still letting memory-intensive tasks such as multi-stage image builds run at full speed. ## Prerequisites Before you turn on the Docker Desktop WSL 2 feature, ensure you have: -- At a minimum WSL version 1.1.3.0., but ideally the latest version of WSL to [avoid Docker Desktop not working as expected](best-practices.md). +- At a minimum WSL version 2.1.5, but ideally the latest version of WSL to [avoid Docker Desktop not working as expected](best-practices.md). - Met the Docker Desktop for Windows' [system requirements](/manuals/desktop/setup/install/windows-install.md#system-requirements). - Installed the WSL 2 feature on Windows. For detailed instructions, refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows/wsl/install-win10). > [!TIP] > -> For a better experience on WSL, consider enabling the WSL -> [autoMemoryReclaim](https://learn.microsoft.com/en-us/windows/wsl/wsl-config#experimental-settings) -> setting available since WSL 1.3.10 (experimental). -> -> This feature enhances the Windows host's ability to reclaim unused memory within the WSL virtual machine, ensuring improved memory availability for other host applications. This capability is especially beneficial for Docker Desktop, as it prevents the WSL VM from retaining large amounts of memory (in GBs) within the Linux kernel's page cache during Docker container image builds, without releasing it back to the host when no longer needed within the VM. +> Consider enabling the WSL [autoMemoryReclaim](https://learn.microsoft.com/en-us/windows/wsl/wsl-config#experimental-settings) setting, available since WSL 1.3.10 (experimental). +>This setting allows Windows to reclaim unused memory from the WSL virtual machine, preventing the Linux kernel's page cache from holding onto large amounts of RAM after container image builds complete. The result is better memory availability for other applications on the host. ## Turn on Docker Desktop WSL 2 -> [!IMPORTANT] -> -> To avoid any potential conflicts with using WSL 2 on Docker Desktop, you must uninstall any previous versions of Docker Engine and CLI installed directly through Linux distributions before installing Docker Desktop. +Before installing Docker Desktop, uninstall any version of Docker Engine or the Docker CLI +that was installed directly inside a WSL Linux distribution. Running both can cause conflicts. -1. Download and install the latest version of [Docker Desktop for Windows](https://desktop.docker.com/win/main/amd64/Docker%20Desktop%20Installer.exe). +1. Download and install the latest version of [Docker Desktop for Windows](https://desktop.docker.com/win/main/amd64/Docker%20Desktop%20Installer.exe?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-windows). 2. Follow the usual installation instructions to install Docker Desktop. Depending on which version of Windows you are using, Docker Desktop may prompt you to turn on WSL 2 during installation. Read the information displayed on the screen and turn on the WSL 2 feature to continue. 3. Start Docker Desktop from the **Windows Start** menu. 4. Navigate to **Settings**. -5. From the **General** tab, select **Use WSL 2 based engine**.. +5. From the **General** tab, select **Use WSL 2 based engine**. If you have installed Docker Desktop on a system that supports WSL 2, this option is turned on by default. -6. Select **Apply & Restart**. +6. Select **Apply**. -Now `docker` commands work from Windows using the new WSL 2 engine. +`docker` commands are now available from any Windows terminal using the WSL 2 engine. > [!TIP] > > By default, Docker Desktop stores the data for the WSL 2 engine at `C:\Users\[USERNAME]\AppData\Local\Docker\wsl`. -> If you want to change the location, for example, to another drive you can do so via the `Settings -> Resources -> Advanced` page from the Docker Dashboard. +> If you want to change the location, go to `Settings -> Resources -> Advanced` page from the Docker Dashboard. > Read more about this and other Windows settings at [Changing settings](/manuals/desktop/settings-and-maintenance/settings.md) -## Enabling Docker support in WSL 2 distributions +## Enable Docker in a WSL 2 distribution -WSL 2 adds support for "Linux distributions" to Windows, where each distribution behaves like a VM except they all run on top of a single shared Linux kernel. - -Docker Desktop does not require any particular Linux distributions to be installed. The `docker` CLI and UI all work fine from Windows without any additional Linux distributions. However for the best developer experience, we recommend installing at least one additional distribution and enable Docker support: +WSL 2 lets multiple Linux distributions run side-by-side on a single shared kernel. Docker Desktop doesn't require a particular distribution to be installed, and `docker` commands work from Windows without one. However, enabling WSL integration for a distribution gives you direct access to `docker` commands from that distribution's terminal — which is useful for Linux-native development workflows. 1. Ensure the distribution runs in WSL 2 mode. WSL can run distributions in both v1 or v2 mode. @@ -88,30 +80,24 @@ Docker Desktop does not require any particular Linux distributions to be install The Docker-WSL integration is enabled on the default WSL distribution, which is [Ubuntu](https://learn.microsoft.com/en-us/windows/wsl/install). To change your default WSL distribution, run: ```console - $ wsl --set-default + $ wsl.exe --set-default ``` If **WSL integrations** isn't available under **Resources**, Docker may be in Windows container mode. In your taskbar, select the Docker menu and then **Switch to Linux containers**. -3. Select **Apply & Restart**. - -> [!NOTE] -> -> With Docker Desktop version 4.30 and earlier, Docker Desktop installed two special-purpose internal Linux distributions `docker-desktop` and `docker-desktop-data`. `docker-desktop` is used to run the Docker engine `dockerd`, while `docker-desktop-data` stores containers and images. Neither can be used for general development. -> -> With fresh installations of Docker Desktop 4.30 and later, `docker-desktop-data` is no longer created. Instead, Docker Desktop creates and -> manages its own virtual hard disk for storage. The `docker-desktop` distribution is still created and used to run the Docker engine. -> -> Note that Docker Desktop version 4.30 and later keeps using the `docker-desktop-data` distribution if it was already created by an earlier version of Docker Desktop and has not been freshly installed or factory reset. +3. Select **Apply**. ## WSL 2 security in Docker Desktop -Docker Desktop’s WSL 2 integration operates within the existing security model of WSL and does not introduce additional security risks beyond standard WSL behavior. +Docker Desktop's WSL 2 integration works within WSL's existing security model and does not introduce security risks beyond standard WSL behavior. + +Docker Desktop runs inside its own `docker-desktop` WSL distribution, isolated from other distributions in the same way any two WSL distributions are isolated from each other. Interaction between Docker Desktop and other distributions only occurs when you explicitly enable WSL integration for those distributions. This feature allows easy access to the Docker CLI from integrated distributions. -Docker Desktop runs within its own dedicated WSL distribution, `docker-desktop`, which follows the same isolation properties as any other WSL distribution. The only interaction between Docker Desktop and other installed WSL distributions occurs when the Docker Desktop **WSL integration** feature is enabled in settings. This feature allows easy access to the Docker CLI from integrated distributions. +WSL is designed to aid interoperability between Windows and Linux environments. Its file system is accessible from the Windows host `\\wsl$`, meaning Windows processes can read and modify files within WSL. This behavior is not specific to Docker Desktop, but rather a core aspect of WSL itself. -WSL is designed to facilitate interoperability between Windows and Linux environments. Its file system is accessible from the Windows host `\\wsl$`, meaning Windows processes can read and modify files within WSL. This behavior is not specific to Docker Desktop, but rather a core aspect of WSL itself. +For environments that require stricter isolation: -For organizations concerned about security risks related to WSL and want stricter isolation and security controls, run Docker Desktop in Hyper-V mode instead of WSL 2. Alternatively, run your container workloads with [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) enabled. +- Run Docker Desktop in Hyper-V mode instead of WSL 2 to avoid the shared-kernel model entirely. +- Enable [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) to add an additional layer of protection around container workloads regardless of backend. ## Additional resources diff --git a/content/manuals/desktop/features/wsl/best-practices.md b/content/manuals/desktop/features/wsl/best-practices.md index 393604b2ced..d4db57121c3 100644 --- a/content/manuals/desktop/features/wsl/best-practices.md +++ b/content/manuals/desktop/features/wsl/best-practices.md @@ -1,26 +1,36 @@ --- -title: Best practices +linkTitle: Best practices +title: WSL 2 best practices for Docker Desktop on Windows description: Best practices for using Docker Desktop with WSL 2 -keywords: wsl, docker desktop, best practices +keywords: wsl 2, docker desktop, best practices, Windows Subsystem for Linux, Docker Desktop Windows performance tags: [Best practices] aliases: - /desktop/wsl/best-practices/ --- -- Always use the latest version of WSL. At a minimum you must use WSL version 1.1.3.0., otherwise Docker Desktop may not work as expected. Testing, development, and documentation is based on the newest kernel versions. Older versions of WSL can cause: - - Docker Desktop to hang periodically or when upgrading - - Deployment via SCCM to fail - - The `vmmem.exe` to consume all memory - - Network filter policies to be applied globally, not to specific objects - - GPU failures with containers +This page covers recommendations when running Docker Desktop on Windows using WSL 2, including version requirements and file system performance. -- To get the best out of the file system performance when bind-mounting files, it's recommended that you store source code and other data that is bind-mounted into Linux containers. For instance, use `docker run -v :` in the Linux file system, rather than the Windows file system. You can also refer to the [recommendation](https://learn.microsoft.com/en-us/windows/wsl/compare-versions) from Microsoft. - - Linux containers only receive file change events, “inotify events”, if the original files are stored in the Linux filesystem. For example, some web development workflows rely on inotify events for automatic reloading when files have changed. - - Performance is much higher when files are bind-mounted from the Linux filesystem, rather than remoted from the Windows host. Therefore avoid `docker run -v /mnt/c/users:/users,` where `/mnt/c` is mounted from Windows. - - Instead, from a Linux shell use a command like `docker run -v ~/my-project:/sources ` where `~` is expanded by the Linux shell to `$HOME`. +## Keep WSL up to date -- If you have concerns about the size of the `docker-desktop-data` distribution, take a look at the [WSL tooling built into Windows](https://learn.microsoft.com/en-us/windows/wsl/disk-space). - - Installations of Docker Desktop version 4.30 and later no longer rely on the `docker-desktop-data` distribution; instead Docker Desktop creates and manages its own virtual hard disk (VHDX) for storage. (note, however, that Docker Desktop keeps using the `docker-desktop-data` distribution if it was already created by an earlier version of the software). - - Starting from version 4.34 and later, Docker Desktop automatically manages the size of the managed VHDX and returns unused space to the operating system. +Always use the latest version of WSL. -- If you have concerns about CPU or memory usage, you can configure limits on the memory, CPU, and swap size allocated to the [WSL 2 utility VM](https://learn.microsoft.com/en-us/windows/wsl/wsl-config#global-configuration-options-with-wslconfig). +At a minimum you must use WSL version 2.1.5, otherwise Docker Desktop may not work as expected. Additionally, if you intend to use Enhanced Container Isolation, ensure you’re using WSL version 2.6 or later. This is required because ECI depends on a Linux kernel version of at least 6.3.0, and WSL 2.6+ bundles Linux kernel version 6.6. Testing, development, and documentation is based on the newest kernel versions. Older versions of WSL can cause: +- Docker Desktop to hang periodically or when upgrading +- Deployment via SCCM to fail +- The `vmmem.exe` to consume all memory +- Network filter policies to be applied globally, not to specific objects +- GPU failures with containers + +## Optimise file system performance with bind mounts + +To get the best out of the file system performance when bind-mounting files, store source code and other data that is bind-mounted into Linux containers. For instance, use `docker run -v :` in the Linux file system, rather than the Windows file system. You can also refer to [Microsoft's recommendation](https://learn.microsoft.com/en-us/windows/wsl/compare-versions). + +Linux containers only receive file change events, “inotify events”, if the original files are stored in the Linux filesystem. For example, some web development workflows rely on inotify events for automatic reloading when files have changed. + +Performance is much higher when files are bind-mounted from the Linux filesystem, rather than accessed from the Windows host filesystem. Therefore avoid `docker run -v /mnt/c/users:/users` where `/mnt/c` is mounted from Windows. + +Instead, from a Linux shell use a command like `docker run -v ~/my-project:/sources ` where `~` is expanded by the Linux shell to `$HOME`. + +## Limit CPU and memory usage + +If you have concerns about CPU or memory usage, configure limits on the memory, CPU, and swap size allocated to the [WSL 2 utility VM](https://learn.microsoft.com/en-us/windows/wsl/wsl-config#global-configuration-options-with-wslconfig). diff --git a/content/manuals/desktop/features/wsl/custom-kernels.md b/content/manuals/desktop/features/wsl/custom-kernels.md index 0fd8fcf0186..020c9e0f1fe 100644 --- a/content/manuals/desktop/features/wsl/custom-kernels.md +++ b/content/manuals/desktop/features/wsl/custom-kernels.md @@ -1,31 +1,34 @@ --- +linkTitle: Custom kernels title: Custom kernels on WSL description: Using custom kernels with Docker Desktop on WSL 2 keywords: wsl, docker desktop, custom kernel tags: [Best practices, troubleshooting] --- -Docker Desktop depends on several kernel features built into the default -WSL 2 Linux kernel distributed by Microsoft. Consequently, using a -custom kernel with Docker Desktop on WSL 2 is not officially supported +> [!WARNING] +> +> Using a custom kernel with Docker Desktop on WSL 2 is not officially supported and may cause issues with Docker Desktop startup or operation. +Docker Desktop depends on several kernel features built into the default +WSL 2 Linux kernel distributed by Microsoft. + However, in some cases it may be necessary to run custom kernels; Docker Desktop does not block their use, and some users have reported success using them. -If you choose to use a custom kernel, it is recommended you start +## Recommendations if you must use a custom kernel + +If you choose to use a custom kernel, start from the kernel tree distributed by Microsoft from their [official repository](https://github.com/microsoft/WSL2-Linux-Kernel) and then add the features you need on top of that. -It's also recommended that you: +Also: - Use the same kernel version as the one distributed by the latest WSL2 release. You can find the version by running `wsl.exe --system uname -r` in a terminal. -- Start from the default kernel configuration as provided by Microsoft -from their [repository](https://github.com/microsoft/WSL2-Linux-Kernel) -and add the features you need on top of that. - Make sure that your kernel build environment includes `pahole` and its version is properly reflected in the corresponding kernel config (`CONFIG_PAHOLE_VERSION`). diff --git a/content/manuals/desktop/features/wsl/use-wsl.md b/content/manuals/desktop/features/wsl/use-wsl.md index fc4a7fd4031..003b11ade95 100644 --- a/content/manuals/desktop/features/wsl/use-wsl.md +++ b/content/manuals/desktop/features/wsl/use-wsl.md @@ -1,16 +1,21 @@ --- -title: Use WSL +linkTitle: Use WSL +title: Develop with Docker Desktop using WSL 2 on Windows description: How to develop with Docker and WSL 2 and understand GPU support for WSL keywords: wsl, wsl 2, develop, docker desktop, windows aliases: - /desktop/wsl/use-wsl/ --- -The following section describes how to start developing your applications using Docker and WSL 2. We recommend that you have your code in your default Linux distribution for the best development experience using Docker and WSL 2. After you have turned on the WSL 2 feature on Docker Desktop, you can start working with your code inside the Linux distribution and ideally with your IDE still in Windows. This workflow is straightforward if you are using [VS Code](https://code.visualstudio.com/download). +The following section describes how to start developing your applications using Docker and WSL 2. + +For the best development experience, store your code inside your default Linux distribution. After you have turned on the WSL 2 feature on Docker Desktop, you can start working with your code inside the Linux distribution and ideally with your IDE still in Windows. This workflow is straightforward if you are using [VS Code](https://code.visualstudio.com/download). ## Develop with Docker and WSL 2 -1. Open VS Code and install the [Remote - WSL](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-wsl) extension. This extension lets you work with a remote server in the Linux distribution and your IDE client still on Windows. +Before you begin, make sure you have enabled WSL 2 integration in Docker Desktop under **Settings** > **Resources** > **WSL Integration**. + +1. Open VS Code and install the [WSL](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-wsl) extension. This extension lets you work with a remote server in the Linux distribution and your IDE client still on Windows. 2. Open your terminal and type: ```console diff --git a/content/manuals/desktop/images/dashboard.png b/content/manuals/desktop/images/dashboard.png deleted file mode 100644 index 904ff4dcd88..00000000000 Binary files a/content/manuals/desktop/images/dashboard.png and /dev/null differ diff --git a/content/manuals/desktop/images/dashboard.webp b/content/manuals/desktop/images/dashboard.webp new file mode 100644 index 00000000000..01602cf6de3 Binary files /dev/null and b/content/manuals/desktop/images/dashboard.webp differ diff --git a/content/manuals/desktop/images/gordon-run-ctr.png b/content/manuals/desktop/images/gordon-run-ctr.png deleted file mode 100644 index 5369a82a7b0..00000000000 Binary files a/content/manuals/desktop/images/gordon-run-ctr.png and /dev/null differ diff --git a/content/manuals/desktop/images/k8s-settings.png b/content/manuals/desktop/images/k8s-settings.png deleted file mode 100644 index aa8882b7647..00000000000 Binary files a/content/manuals/desktop/images/k8s-settings.png and /dev/null differ diff --git a/content/manuals/desktop/images/resource-saver-settings.png b/content/manuals/desktop/images/resource-saver-settings.png deleted file mode 100644 index d492399c816..00000000000 Binary files a/content/manuals/desktop/images/resource-saver-settings.png and /dev/null differ diff --git a/content/manuals/desktop/images/resource-saver-settings.webp b/content/manuals/desktop/images/resource-saver-settings.webp new file mode 100644 index 00000000000..c43a64df94e Binary files /dev/null and b/content/manuals/desktop/images/resource-saver-settings.webp differ diff --git a/content/manuals/desktop/images/resource-saver-status-bar.png b/content/manuals/desktop/images/resource-saver-status-bar.png deleted file mode 100644 index c1724e58e78..00000000000 Binary files a/content/manuals/desktop/images/resource-saver-status-bar.png and /dev/null differ diff --git a/content/manuals/desktop/images/shared-folder-on-demand.png b/content/manuals/desktop/images/shared-folder-on-demand.png deleted file mode 100644 index 957a5b9046e..00000000000 Binary files a/content/manuals/desktop/images/shared-folder-on-demand.png and /dev/null differ diff --git a/content/manuals/desktop/images/synched-file-shares.webp b/content/manuals/desktop/images/synched-file-shares.webp deleted file mode 100644 index 828df943d07..00000000000 Binary files a/content/manuals/desktop/images/synched-file-shares.webp and /dev/null differ diff --git a/content/manuals/desktop/previous-versions/2.x-mac.md b/content/manuals/desktop/previous-versions/2.x-mac.md index d1e9b00fb78..a582c9a6aeb 100644 --- a/content/manuals/desktop/previous-versions/2.x-mac.md +++ b/content/manuals/desktop/previous-versions/2.x-mac.md @@ -28,7 +28,7 @@ Docker Desktop 2.5.0.0 contains a Kubernetes upgrade. Your local Kubernetes clus ### New -- Users with a paid Docker subscription plan can now see the vulnerability scan report on the Remote repositories tab in Docker Desktop. +- Users with a paid Docker subscription can now see the vulnerability scan report on the Remote repositories tab in Docker Desktop. - Docker Desktop introduces a support option for users with a paid Docker subscription. ### Security diff --git a/content/manuals/desktop/previous-versions/3.x-mac.md b/content/manuals/desktop/previous-versions/3.x-mac.md index 3444a12d02f..edf1038b506 100644 --- a/content/manuals/desktop/previous-versions/3.x-mac.md +++ b/content/manuals/desktop/previous-versions/3.x-mac.md @@ -49,7 +49,7 @@ This page contains release notes for Docker Desktop for Mac 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. ### Upgrades @@ -60,8 +60,8 @@ This page contains release notes for Docker Desktop for Mac 3.x. - Fixed network's IPAM configuration. Service can define a fixed IP. Fixes for [docker/compose-cli#1678](https://github.com/docker/compose-cli/issues/1678) and [docker/compose-cli#1816](https://github.com/docker/compose-cli/issues/1816) - Dev Environments - - Support VS Code Insiders. See [dev-environments#3](https://github.com/docker/dev-environments/issues/3) - - Allow users to specify a branch when cloning a project. See [dev-environments#11](https://github.com/docker/dev-environments/issues/11) + - Support VS Code Insiders. + - Allow users to specify a branch when cloning a project. ### Bug fixes and minor changes @@ -77,9 +77,9 @@ This page contains release notes for Docker Desktop for Mac 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. -**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. +**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the docker-compose compatibility list for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. ### Bug fixes and minor changes @@ -90,9 +90,9 @@ This page contains release notes for Docker Desktop for Mac 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. -**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. +**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the docker-compose compatibility list for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. ### Upgrades @@ -124,7 +124,7 @@ This page contains release notes for Docker Desktop for Mac 3.x. **Volume Management**: Docker Desktop users can now create and delete volumes using the Docker Dashboard and also see which volumes are being used. For more information, see [Explore volumes](../use-desktop/volumes.md). -**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. +**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the docker-compose compatibility list for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. **Skip Docker Desktop updates**: All users can now skip an update when they are prompted to install individual Docker Desktop releases. diff --git a/content/manuals/desktop/previous-versions/3.x-windows.md b/content/manuals/desktop/previous-versions/3.x-windows.md index b93c1c7435f..910a8afe3d0 100644 --- a/content/manuals/desktop/previous-versions/3.x-windows.md +++ b/content/manuals/desktop/previous-versions/3.x-windows.md @@ -56,7 +56,7 @@ This page contains release notes for Docker Desktop for Windows 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. ### Upgrades @@ -67,7 +67,7 @@ This page contains release notes for Docker Desktop for Windows 3.x. - Fixed network's IPAM configuration. Service can define a fixed IP. Fixes for [docker/compose-cli#1678](https://github.com/docker/compose-cli/issues/1678) and [docker/compose-cli#1816](https://github.com/docker/compose-cli/issues/1816) - Dev Environments - - Support VS Code Insiders. See [dev-environments#3](https://github.com/docker/dev-environments/issues/3) + - Support VS Code Insiders. - Allow users to specify a branch when cloning a project. See [dev-environments#11](https://github.com/docker/dev-environments/issues/11) ### Bug fixes and minor changes @@ -84,9 +84,9 @@ This page contains release notes for Docker Desktop for Windows 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. -**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. +**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the docker-compose compatibility list for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. ### Bug fixes and minor changes @@ -98,9 +98,9 @@ This page contains release notes for Docker Desktop for Windows 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. -**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. +**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the docker-compose compatibility list for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. ### Upgrades @@ -131,7 +131,7 @@ This page contains release notes for Docker Desktop for Windows 3.x. **Volume Management**: Docker Desktop users can now create and delete volumes using the Docker Dashboard and also see which volumes are being used. For more information, see [Explore volumes](../use-desktop/volumes.md). -**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. +**Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the docker-compose compatibility list for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. **Skip Docker Desktop updates**: All users can now skip an update when they are prompted to install individual Docker Desktop releases. diff --git a/content/manuals/desktop/previous-versions/archive-mac.md b/content/manuals/desktop/previous-versions/archive-mac.md index 692afebd842..5de481dc4e0 100644 --- a/content/manuals/desktop/previous-versions/archive-mac.md +++ b/content/manuals/desktop/previous-versions/archive-mac.md @@ -31,7 +31,7 @@ This page contains release notes for older versions of Docker Desktop for Mac. - Linux Kernel 4.9.93 with CEPH, DRBD, RBD, MPLS_ROUTING and MPLS_IPTUNNEL enabled * New - - Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker For Mac Preferences and use kubectl commands as well as docker commands. See [the Kubernetes section](/manuals/desktop/features/kubernetes.md) + - Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker For Mac Preferences and use kubectl commands as well as docker commands. See [the Kubernetes section](/manuals/desktop/use-desktop/kubernetes.md) - Add an experimental SOCKS server to allow access to container networks, see [docker/for-mac#2670](https://github.com/docker/for-mac/issues/2670#issuecomment-372365274). Also see [docker/for-mac#2721](https://github.com/docker/for-mac/issues/2721) - Re-enable raw as the default disk format for users running macOS 10.13.4 and higher. Note this change only takes effect after a "reset to factory defaults" or "remove all data" (from the Whale menu -> Preferences -> Reset). Related to [docker/for-mac#2625](https://github.com/docker/for-mac/issues/2625) diff --git a/content/manuals/desktop/previous-versions/archive-windows.md b/content/manuals/desktop/previous-versions/archive-windows.md index 262e7c79339..3f6b0433cd3 100644 --- a/content/manuals/desktop/previous-versions/archive-windows.md +++ b/content/manuals/desktop/previous-versions/archive-windows.md @@ -47,7 +47,7 @@ This page contains release notes for older versions of Docker Desktop for Window - Linux Kernel 4.9.93 with CEPH, DRBD, RBD, MPLS_ROUTING and MPLS_IPTUNNEL enabled * New - - Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker for Windows settings and use kubectl commands as well as Docker commands. See [the Kubernetes section](/manuals/desktop/features/kubernetes.md) + - Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker for Windows settings and use kubectl commands as well as Docker commands. See [the Kubernetes section](/manuals/desktop/use-desktop/kubernetes.md) * Bug fixes and minor changes - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset the disk image (in Settings > Reset menu) before updating to the next major update. You can check documentation to [save images](/reference/cli/docker/image/save/#examples) and [backup volumes](/manuals/engine/storage/volumes.md#back-up-restore-or-migrate-data-volumes) diff --git a/content/manuals/desktop/previous-versions/edge-releases-mac.md b/content/manuals/desktop/previous-versions/edge-releases-mac.md index 60a9bccf7f5..a6032d4ce09 100644 --- a/content/manuals/desktop/previous-versions/edge-releases-mac.md +++ b/content/manuals/desktop/previous-versions/edge-releases-mac.md @@ -5,16 +5,17 @@ title: Docker Desktop for Mac Edge release notes toc_min: 1 toc_max: 2 aliases: -- /desktop/mac/release-notes/edge-releases/ + - /desktop/mac/release-notes/edge-releases/ sitemap: false --- -This page contains information about Docker Desktop Edge releases. Edge releases give you early access to our newest features. Note that some of the features may be experimental, and some of them may not ever reach the Stable release. +This page contains information about Docker Desktop Edge releases. Edge releases give you early access to our newest features. Note that some of the features may be experimental, and some of them may not ever reach the Stable release. For Docker Desktop system requirements, see [What to know before you install](/manuals/desktop/setup/install/mac-install.md#system-requirements). ## Docker Desktop Community 2.5.4 + 2020-12-07 ### Upgrades @@ -27,10 +28,11 @@ For Docker Desktop system requirements, see - Changed the «Update and quit» menu entry to «Update and restart». - Fixed the check for updates dialog reporting the build number instead of the version number of a new version. - Downgraded the kernel to [4.19.121](https://hub.docker.com/layers/docker/for-desktop-kernel/4.19.121-2a1dbedf3f998dac347c499808d7c7e029fbc4d3-amd64/images/sha256-4e7d94522be4f25f1fbb626d5a0142cbb6e785f37e437f6fd4285e64a199883a?context=repo) to reduce the CPU usage of hyperkit. Fixes [docker/for-mac#5044](https://github.com/docker/for-mac/issues/5044) -- Fixed a bug that DNS would return `NXDOMAIN` when a name exists but the type of record was not found. Fixes [docker/for-mac#5020](https://github.com/docker/for-mac/issues/5020). Related to https://gitlab.alpinelinux.org/alpine/aports/-/issues/11879 +- Fixed a bug that DNS would return `NXDOMAIN` when a name exists but the type of record was not found. Fixes [docker/for-mac#5020](https://github.com/docker/for-mac/issues/5020). Related to https://gitlab.alpinelinux.org/alpine/aports/-/issues/11879 - Avoid caching bad file sizes and modes when using `osxfs`. Fixes [docker/for-mac#5045](https://github.com/docker/for-mac/issues/5045). ## Docker Desktop Community 2.5.3 + 2020-11-30 ### Upgrades @@ -43,6 +45,7 @@ For Docker Desktop system requirements, see - Removed unnecessary log messages which slow down filesystem event injection. ## Docker Desktop Community 2.5.2 + 2020-11-26 ### New @@ -57,6 +60,7 @@ For Docker Desktop system requirements, see - Display an error message instead of crashing when the application needs write access on specific directories. See [docker/for-mac#5068](https://github.com/docker/for-mac/issues/5068) ## Docker Desktop Community 2.5.1.0 + 2020-11-18 This release contains a Kubernetes upgrade. Note that your local Kubernetes cluster will be reset after installing Docker Desktop. @@ -78,6 +82,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Fixed an unexpected EOF error when trying to start a non-existing container. See [docker/for-mac#5025](https://github.com/docker/for-mac/issues/5025). ## Docker Desktop Community 2.4.2.0 + 2020-10-19 ### New @@ -106,6 +111,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Fixed automatic start on log in. See [docker/for-mac#4877](https://github.com/docker/for-mac/issues/4877) and [docker/for-mac#4890](https://github.com/docker/for-mac/issues/4890). ## Docker Desktop Community 2.4.1.0 + 2020-10-01 ### Upgrades @@ -122,6 +128,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Fixed a possible premature file handle close when using `gRPC-FUSE`. ## Docker Desktop Community 2.3.7.0 + 2020-09-17 ### New @@ -146,10 +153,11 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ### Known issues - The `clock_gettime64` system call returns `EPERM` rather than `ENOSYS` -in i386 images. To work around this issue, disable `seccomp` by using -the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for-win/issues/8326). + in i386 images. To work around this issue, disable `seccomp` by using + the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for-win/issues/8326). ## Docker Desktop Community 2.3.6.1 + 2020-09-08 ### Upgrades @@ -158,9 +166,10 @@ the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for ### Bug fixes and minor changes -- Docker Desktop now correctly displays the state of "Use gRPC FUSE for file sharing" in the UI. Fixes [docker/for-mac#4864](https://github.com/docker/for-mac/issues/4864). +- Docker Desktop now correctly displays the state of "Use gRPC FUSE for file sharing" in the UI. Fixes [docker/for-mac#4864](https://github.com/docker/for-mac/issues/4864). ## Docker Desktop Community 2.3.6.0 + 2020-09-01 ### New @@ -183,9 +192,10 @@ the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for ### Bug fixes and minor changes -- Fixed a Mac CPU usage bug by removing the serial console from `hyperkit`, see [docker/roadmap#12]( https://github.com/docker/roadmap/issues/12#issuecomment-663163280). To open a shell in the VM use either `nc -U ~/Library/Containers/com.docker.docker/Data/debug-shell.sock` (on Mac) or `putty -serial \\.\pipe\dockerDebugShell` (on Windows). +- Fixed a Mac CPU usage bug by removing the serial console from `hyperkit`, see [docker/roadmap#12](https://github.com/docker/roadmap/issues/12#issuecomment-663163280). To open a shell in the VM use either `nc -U ~/Library/Containers/com.docker.docker/Data/debug-shell.sock` (on Mac) or `putty -serial \\.\pipe\dockerDebugShell` (on Windows). ## Docker Desktop Community 2.3.5.0 + 2020-08-21 ### New @@ -214,6 +224,7 @@ the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for - Fixed minor bugs in the **Images** view. ## Docker Desktop Community 2.3.4.0 + 2020-07-28 ### New @@ -233,6 +244,7 @@ the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for - Mutagen two-way sync now uses `.dockersyncignore` rather than `.dockerignore` to exclude files. ## Docker Desktop Community 2.3.3.2 + 2020-07-21 ### Upgrades @@ -246,6 +258,7 @@ the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for - Docker CLI commands can now bypass any active Mutagen synchronization for volumes using `:cached`. See [docker/for-mac#1592](https://github.com/docker/for-mac/issues/1592#issuecomment-651309816). ## Docker Desktop Community 2.3.3.0 + 2020-07-09 ### Upgrades @@ -262,6 +275,7 @@ the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for - Docker Desktop now implements the shared volume flag `:delegated` by automatically setting up a two-way file sync with Mutagen. ## Docker Desktop Community 2.3.2.0 + 2020-06-25 ### Upgrades @@ -283,6 +297,7 @@ the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for - Removed the legacy Kubernetes context `docker-for-desktop`. The context `docker-desktop` should be used instead. See [docker/for-mac#4089](https://github.com/docker/for-mac/issues/4089). ## Docker Desktop Community 2.3.1.0 + 2020-05-20 ### New @@ -302,16 +317,16 @@ We appreciate you trying out an early version of the Mutagen file sync feature. - Fixed containers logs in Docker Desktop **Dashboard** which were sometimes truncated. Fixes [docker/for-win#5954](https://github.com/docker/for-win/issues/5954). ## Docker Desktop Community 2.3.0.1 -2020-04-28 +2020-04-28 ### Bug fixes and minor changes - Fixed a bug that caused starting and stopping of a Compose application from the UI to fail when the path contains whitespace. ## Docker Desktop Community 2.3.0.0 -2020-04-20 +2020-04-20 ### Upgrades @@ -332,8 +347,8 @@ We appreciate you trying out an early version of the Mutagen file sync feature. - Fixed bug where diagnostic upload would fail if the username contained spaces. ## Docker Desktop Community 2.2.3.0 -2020-04-02 +2020-04-02 ### Upgrades @@ -363,8 +378,8 @@ We appreciate you trying out an early version of the Mutagen file sync feature. - Loopback and unspecified IPv6 addresses (`::` and `::1`) within a container do not currently work. Some web servers and other programs may be using these addresses in their configuration files. ## Docker Desktop Community 2.2.2.0 -2020-03-02 +2020-03-02 This release contains a Kubernetes upgrade. Note that your local Kubernetes cluster will be reset after installing Docker Desktop. @@ -379,8 +394,8 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Ceph support has been removed from Docker Desktop to save disk space. ## Docker Desktop Community 2.2.1.0 -2020-02-12 +2020-02-12 ### Upgrades @@ -388,10 +403,10 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - [Go 1.12.16](https://golang.org/doc/devel/release.html#go1.12) ## Docker Desktop Community 2.1.7.0 -2019-12-11 +2019-12-11 -> [!NOTE] +> [!NOTE] > > Docker Desktop Edge 2.1.7.0 is the release candidate for the upcoming major Stable release. Please help us test this version before the wider release and report any issues in the [docker/for-mac](https://github.com/docker/for-mac/issues) GitHub repository. @@ -411,8 +426,8 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Fixed an issue where attempts to log into Docker through Docker Desktop could sometimes fail with the `Incorrect authentication credentials` error. Fixes [docker/for-mac#4010](https://github.com/docker/for-mac/issues/4010). ## Docker Desktop Community 2.1.6.0 -2019-11-18 +2019-11-18 ### Upgrades @@ -429,8 +444,8 @@ Added the ability to start and stop Compose-based applications and view combined - Fixed a container start error when a container has more than one port with an arbitrary or not-yet-configured external port number. For example, `docker run -p 80 -p 443 nginx`. Fixes [docker/for-win#4935](https://github.com/docker/for-win/issues/4935) and [docker/compose#6998](https://github.com/docker/compose/issues/6998). ## Docker Desktop Community 2.1.5.0 -2019-11-04 +2019-11-04 This release contains a Kubernetes upgrade. Note that your local Kubernetes cluster will be reset after installation. @@ -457,8 +472,8 @@ Fixed an issue that caused VMs running on older hardware with macOS Catalina to - When you deploy a Docker App with multiple containers on Kubernetes, Docker Desktop displays each Pod as an application on the Dashboard. ## Docker Desktop Community 2.1.4.0 -2019-10-15 +2019-10-15 ### Upgrades @@ -475,8 +490,8 @@ Fixed an issue that caused VMs running on older hardware with macOS Catalina to - Docker Machine is no longer included in the Docker Desktop installer. You can download it separately from the [Docker Machine releases](https://github.com/docker/machine/releases) page. ## Docker Desktop Community 2.1.3.0 -2019-09-16 +2019-09-16 ### Bug fixes and minor changes @@ -486,8 +501,8 @@ Fixed an issue that caused VMs running on older hardware with macOS Catalina to - Deactivated the **Reset Kubernetes** button when Kubernetes is not activated. ## Docker Desktop Community 2.1.2.0 -2019-09-09 +2019-09-09 #### Upgrades @@ -504,8 +519,8 @@ Fixed an issue that caused VMs running on older hardware with macOS Catalina to - Added support for `Expect: 100-continue` headers in the Docker API proxy. Some HTTP clients such as `curl` send this header when the payload is large, for example, when creating containers. Fixes [moby/moby#39693](https://github.com/moby/moby/issues/39693). ## Docker Desktop Community 2.1.1.0 -2019-08-12 +2019-08-12 #### Upgrades @@ -519,25 +534,25 @@ Fixed an issue that caused VMs running on older hardware with macOS Catalina to - Fixed an issue where running some Docker commands can fail if you are not using Credential Helpers. [docker/for-mac#3785](https://github.com/docker/for-mac/issues/3785) - Fixed a bug that did not allow users to copy and paste text in the **Preferences** > **Daemon** window. [docker/for-mac#3798](https://github.com/docker/for-mac/issues/3798) -## Docker Desktop Community 2.1.0.0 -2019-07-26 +## Docker Desktop Community 2.1.0.0 +2019-07-26 This release contains Kubernetes security improvements. Note that your local Kubernetes PKI and cluster will be reset after installation. #### Upgrades - - [Docker 19.03.1](https://github.com/docker/docker-ce/releases/tag/v19.03.1) - - [Docker Compose 1.24.1](https://github.com/docker/compose/releases/tag/1.24.1) - - [Alpine 3.10](https://alpinelinux.org/posts/Alpine-3.10.0-released.html) - - Linux Kernel 4.9.184 - - [Docker Credential Helpers 0.6.3](https://github.com/docker/docker-credential-helpers/releases/tag/v0.6.3) +- [Docker 19.03.1](https://github.com/docker/docker-ce/releases/tag/v19.03.1) +- [Docker Compose 1.24.1](https://github.com/docker/compose/releases/tag/1.24.1) +- [Alpine 3.10](https://alpinelinux.org/posts/Alpine-3.10.0-released.html) +- Linux Kernel 4.9.184 +- [Docker Credential Helpers 0.6.3](https://github.com/docker/docker-credential-helpers/releases/tag/v0.6.3) #### New - - Introduced a new user interface for the Docker Desktop **Preferences** menu. - - The **Restart**, **Reset**, and **Uninstall** options are now available on the **Troubleshoot** menu. - +- Introduced a new user interface for the Docker Desktop **Preferences** menu. +- The **Restart**, **Reset**, and **Uninstall** options are now available on the **Troubleshoot** menu. + #### Bug fixes and minor changes - Changed the host's Kubernetes context to ensure `docker run -v .kube:kube ... kubectl` works. @@ -549,26 +564,25 @@ This release contains Kubernetes security improvements. Note that your local Kub ## Docker Community Edition 2.0.5.0 2019-06-12 - This is the Edge channel, which gives you early access to our newest features. Be aware that some of them may be experimental, and some of them may not ever reach the Stable release. This release contains a Kubernetes upgrade. Note that your local Kubernetes cluster will be reset after install. -* Upgrades +- Upgrades - [Docker 19.03.0-rc2](https://github.com/docker/docker-ce/releases/tag/v19.03.0-rc2) - [Kubernetes 1.14.3](https://github.com/kubernetes/kubernetes/releases/tag/v1.14.3) - [Compose on Kubernetes 0.4.23](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.23) - [linuxkit v0.7](https://github.com/linuxkit/linuxkit/releases/tag/v0.7) - [Qemu 4.0.0](https://github.com/docker/binfmt) for cross compiling for ARM -* New +- New - Docker Desktop includes the `buildx` plugin (currently experimental). - - Selecting the `Experimental features` checkbox on the Docker Desktop Preferences Daemon page enables experimental features in the Docker daemon and the Docker CLI. + - Selecting the `Experimental features` checkbox on the Docker Desktop Preferences Daemon page enables experimental features in the Docker daemon and the Docker CLI. - Docker Desktop has improved the reliability of `com.docker.osxfs trace` performance profiling command. - Users can now run the `com.docker.osxfs trace --summary` option to get a high-level summary of operations, instead of receiving a trace of all operations. - Docker Desktop now supports large lists of DNS resource records on Mac. Fixes [docker/for-mac#2160](https://github.com/docker/for-mac/issues/2160#issuecomment-431571031) -* Bug fixes and minor changes +- Bug fixes and minor changes - Docker Desktop does not send DNS queries for `docker-desktop.` every 10s. It now relies on the host's DNS domain search order rather than trying to replicate it inside the VM. - Docker Desktop has removed the ability to log in using email address as a username as the Docker command line does not support this. - Docker Desktop now allows running a Docker registry inside a container. Fixes [docker/for-mac#3611](https://github.com/docker/for-mac/issues/3611) @@ -576,31 +590,27 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ## Docker Community Edition 2.0.4.1 2019-05-07 - -* Bug fixes and minor changes +- Bug fixes and minor changes - Upgrade QEMU from 2.8.0 to 3.1.0 to fix an emulation issue when building and running Java applications on Arm64 devices. ## Docker Community Edition 2.0.4.0 2019-04-30 - -* Upgrades +- Upgrades - [Docker 19.03.0-beta3](https://github.com/docker/docker-ce/releases/tag/v19.03.0-beta3) - [Docker Compose 1.24.0](https://github.com/docker/compose/releases/tag/1.24.0) - [Compose on Kubernetes 0.4.22](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.22) - [Kubernetes 1.14.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#changelog-since-v1141) -* New +- New - App: Docker CLI plugin to configure, share, and install applications - - Extend Compose files with metadata and parameters - Reuse the same application across multiple environments (Development/QA/Staging/Production) - Multi-orchestrator installation (Swarm or Kubernetes) - Push/Pull/Promotion/Signing supported for application, with the same workflow as images - Fully CNAB compliant - Full support for Docker Contexts - + - Buildx (Tech Preview): Docker CLI plugin for extended build capabilities with BuildKit - - Familiar UI from docker build - Full BuildKit capabilities with container driver - Multiple builder instance support @@ -608,53 +618,49 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Concurrent building of Compose files - High-level build constructs with `bake` -* Bug fixes and minor changes +- Bug fixes and minor changes - Truncate UDP DNS responses which are over 512 bytes in size ## Docker Community Edition 2.0.3.0 2019-03-05 - -* Upgrades +- Upgrades - [Docker 18.09.3](https://github.com/docker/docker-ce/releases/tag/v18.09.3) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fixed port 8080 that was used on localhost when starting Kubernetes. Fixes [docker/for-mac#3522](https://github.com/docker/for-mac/issues/3522) - Error message improvements, do not propose to run diagnostics / reset to factory default when not appropriate. ### Docker Community Edition 2.0.2.1 2019-02-15 - -* Upgrades +- Upgrades - [Docker 18.09.2](https://github.com/docker/docker-ce/releases/tag/v18.09.2), fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736) ## Docker Community Edition 2.0.2.0 2019-02-06 - -* Upgrades +- Upgrades - [Docker Compose 1.24.0-rc1](https://github.com/docker/compose/releases/tag/1.24.0-rc1) - [Docker Machine 0.16.1](https://github.com/docker/machine/releases/tag/v0.16.1) - [Compose on Kubernetes 0.4.18](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.18) -* New +- New - Rebranded UI - -* Bug fixes and minor changes + +- Bug fixes and minor changes - Kubernetes: use default maximum number of pods for kubelet. [docker/for-mac#3453](https://github.com/docker/for-mac/issues/3453) - Fix DockerHelper crash. [docker/for-mac#3470](https://github.com/docker/for-mac/issues/3470) - Fix binding of privileged ports with specified IP. [docker/for-mac#3464](https://github.com/docker/for-mac/issues/3464) ## Docker Community Edition 2.0.1.0 2019-01-11 - -* Upgrades +- Upgrades - [Docker 18.09.1](https://github.com/docker/docker-ce/releases/tag/v18.09.1) - [Kubernetes 1.13.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#v1130) - [Kitematic 0.17.6](https://github.com/docker/kitematic/releases/tag/v0.17.6) - Golang 1.10.6, fixes CVEs: [CVE-2018-16875](https://www.cvedetails.com/cve/CVE-2018-16875), [CVE-2018-16873](https://www.cvedetails.com/cve/CVE-2018-16873) and [CVE-2018-16874](https://www.cvedetails.com/cve/CVE-2018-16874) - + WARNING: If you have an existing Kubernetes cluster created with Docker Desktop, this upgrade will reset the cluster. If you need to back up your Kubernetes cluster or persistent volumes you can use [Ark](https://github.com/heptio/ark). -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix service log collection in diagnostics - Gather /etc/hosts to help diagnostics - Ensure localhost resolves to 127.0.0.1. Related to [docker/for-mac#2990](https://github.com/docker/for-mac/issues/2990#issuecomment-443097942), [docker/for-mac#3383](https://github.com/docker/for-mac/issues/3383) @@ -666,20 +672,18 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ### Docker Community Edition 2.0.0.0-mac82 2018-12-07 - -* Upgrades +- Upgrades - [Docker compose 1.23.2](https://github.com/docker/compose/releases/tag/1.23.2) - [Docker Machine 0.16.0](https://github.com/docker/machine/releases/tag/v0.16.0) ### Docker Community Edition 2.0.0.0-mac77 2018-11-14 - -* Upgrades +- Upgrades - [Docker 18.09.0](https://github.com/docker/docker-ce-packaging/releases/tag/v18.09.0) - [Docker compose 1.23.1](https://github.com/docker/compose/releases/tag/1.23.1) - [Kitematic 0.17.5](https://github.com/docker/kitematic/releases/tag/v0.17.5) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix appearance in dark mode for OS X 10.14 (Mojave) - VPNKit: Improved scalability of port forwarding. Related to [docker/for-mac#2841](https://github.com/docker/for-mac/issues/2841) - VPNKit: Limit the size of the UDP NAT table. This ensures port forwarding and regular TCP traffic continue even when running very chatty UDP protocols. @@ -687,52 +691,48 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ### Docker Community Edition 2.0.0.0-beta1-mac75 2018-09-14 - -* Upgrades +- Upgrades - [Docker 18.09.0-ce-beta1](https://github.com/docker/docker-ce/releases/tag/v18.09.0-ce-beta1) - Linux Kernel 4.9.125 -* New +- New - New version scheme -* Deprecation +- Deprecation - Removed support of AUFS - Removed support of OS X 10.11 -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix panic in diagnose ### Docker Community Edition 18.06.1-ce-mac74 2018-08-29 - -* Upgrades +- Upgrades - [Docker 18.06.1-ce](https://github.com/docker/docker-ce/releases/tag/v18.06.1-ce) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix local DNS failing to resolve inside containers. ### Docker Community Edition 18.06.0-ce-mac69 2018-07-25 - -* Upgrades +- Upgrades - [Docker 18.06.0-ce](https://github.com/docker/docker-ce/releases/tag/v18.06.0-ce) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix bug in experimental SOCKS server. See [docker/for-mac#2670](https://github.com/docker/for-mac/issues/2670) - Fix bug in docker login when "Securely store Docker logins in macOS keychain" is unchecked. Fixed [docker/for-mac#3104](https://github.com/docker/for-mac/issues/3104) ### Docker Community Edition 18.06.0-ce-rc3-mac68 2018-07-19 - -* Upgrades +- Upgrades - [Docker 18.06.0-ce-rc3](https://github.com/docker/docker-ce/releases/tag/v18.06.0-ce-rc3) - [Docker Machine 0.15.0](https://github.com/docker/machine/releases/tag/v0.15.0) - [Docker compose 1.22.0](https://github.com/docker/compose/releases/tag/1.22.0) -* New +- New - Add an experimental SOCKS server to allow access to container networks, see [docker/for-mac#2670](https://github.com/docker/for-mac/issues/2670#issuecomment-372365274). Also see [docker/for-mac#2721](https://github.com/docker/for-mac/issues/2721) -* Bug fixes and minor changes +- Bug fixes and minor changes - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset disk image (in Preferences > Reset menu) before updating to the next major update. You can check documentation to [save images](/reference/cli/docker/image/save/#examples) and [back up volumes](/manuals/engine/storage/volumes.md#back-up-restore-or-migrate-data-volumes) - Fix startup issue with AUFS [docker/for-mac#2804](https://github.com/docker/for-mac/issues/2804) - Fix status bug which could prevent the Kubernetes cluster from starting. Fixes [docker/for-mac#2990](https://github.com/docker/for-mac/issues/2990) @@ -742,31 +742,28 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ### Docker Community Edition 18.05.0-ce-mac67 2018-06-07 - -* Upgrades +- Upgrades - [LinuxKit v0.4](https://github.com/linuxkit/linuxkit/releases/tag/v0.4) - Linux Kernel 4.9.93 with CEPH, DRBD, RBD, MPLS_ROUTING and MPLS_IPTUNNEL enabled - [Kubernetes 1.10.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#v1103). If Kubernetes is enabled, the upgrade will be performed automatically when starting Docker Desktop for Mac. -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix VPNKit memory leak. Fixes [moby/vpnkit#371](https://github.com/moby/vpnkit/issues/371) - Fix com.docker.supervisor using 100% CPU. Fixes [docker/for-mac#2967](https://github.com/docker/for-mac/issues/2967), [docker/for-mac#2923](https://github.com/docker/for-mac/issues/2923) - Do not override existing kubectl binary in /usr/local/bin (installed with brew or otherwise). Fixes [docker/for-mac#2368](https://github.com/docker/for-mac/issues/2368), [docker/for-mac#2890](https://github.com/docker/for-mac/issues/2890) - - Detect Vmnetd install error. Fixes [docker/for-mac#2934](https://github.com/docker/for-mac/issues/2934), [docker/for-mac#2687](https://github.com/docker/for-mac/issues/2687) + - Detect Vmnetd install error. Fixes [docker/for-mac#2934](https://github.com/docker/for-mac/issues/2934), [docker/for-mac#2687](https://github.com/docker/for-mac/issues/2687) - Virtual machine default disk path is stored relative to $HOME. Fixes [docker/for-mac#2928](https://github.com/docker/for-mac/issues/2928), [docker/for-mac#1209](https://github.com/docker/for-mac/issues/1209) - ### Docker Community Edition 18.05.0-ce-mac66 2018-05-17 - -* Upgrades +- Upgrades - [Docker 18.05.0-ce](https://github.com/docker/docker-ce/releases/tag/v18.05.0-ce) - [Docker compose 1.21.2](https://github.com/docker/compose/releases/tag/1.21.2) -* New +- New - Allow orchestrator selection from the UI in the "Kubernetes" pane, to allow "docker stack" commands to deploy to Swarm clusters, even if Kubernetes is enabled in Docker for Mac. - -* Bug fixes and minor changes + +- Bug fixes and minor changes - Use Simple NTP to minimize clock drift between the virtual machine and the host. Fixes [docker/for-mac#2076](https://github.com/docker/for-mac/issues/2076) - Fix filesystem event notifications for Swarm services and those using the new-style --mount option. Fixes [docker/for-mac#2216](https://github.com/docker/for-mac/issues/2216), [docker/for-mac#2375](https://github.com/docker/for-mac/issues/2375) - Fix filesystem event delivery to Kubernetes pods when the path to the bind mount is a symlink. @@ -777,80 +774,73 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ### Docker Community Edition 18.05.0-ce-rc1-mac63 2018-04-26 - -* Upgrades +- Upgrades - [Docker 18.05.0-ce-rc1](https://github.com/docker/docker-ce/releases/tag/v18.05.0-ce-rc1) - [Notary 0.6.1](https://github.com/docker/notary/releases/tag/v0.6.1) -* New +- New - Re-enable raw as the default disk format for users running macOS 10.13.4 and higher. Note this change only takes effect after a "reset to factory defaults" or "remove all data" (from the Whale menu > Preferences > Reset). Related to [docker/for-mac#2625](https://github.com/docker/for-mac/issues/2625) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix Docker for Mac not starting due to socket file paths being too long (typically HOME folder path being too long). Fixes [docker/for-mac#2727](https://github.com/docker/for-mac/issues/2727), [docker/for-mac#2731](https://github.com/docker/for-mac/issues/2731). ### Docker Community Edition 18.04.0-ce-mac62 2018-04-12 - -* Upgrades +- Upgrades - [Docker 18.04.0-ce](https://github.com/docker/docker-ce/releases/tag/v18.04.0-ce) - [Docker compose 1.21.0](https://github.com/docker/compose/releases/tag/1.21.0) ### Docker Community Edition 18.04.0-ce-rc2-mac61 2018-04-09 - -* Upgrades +- Upgrades - [Docker 18.04.0-ce-rc2](https://github.com/docker/docker-ce/releases/tag/v18.04.0-ce-rc2) - [Kubernetes 1.9.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.9.md#v196). If Kubernetes is enabled, the upgrade will be performed automatically when starting Docker for Mac. -* New +- New - Enable ceph & rbd modules in LinuxKit virtual machine. -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix upgrade straight from pre-17.12 versions where Docker for Mac cannot restart once the upgrade has been performed. Fixes [docker/for-mac#2739](https://github.com/docker/for-mac/issues/2739) ### Docker Community Edition 18.03.0-ce-mac58 2018-03-26 - -* Upgrades +- Upgrades - [Docker 18.03.0-ce](https://github.com/docker/docker-ce/releases/tag/v18.03.0-ce) - [Docker compose 1.20.1](https://github.com/docker/compose/releases/tag/1.20.1) ### Docker Community Edition 18.03.0-ce-rc4-mac57 2018-03-15 - -* Upgrades +- Upgrades - [Docker 18.03.0-ce-rc4](https://github.com/docker/docker-ce/releases/tag/v18.03.0-ce-rc4) - AUFS 20180312 -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix support for AUFS. Fixes [docker/for-win#1831](https://github.com/docker/for-win/issues/1831) - Fix synchronization between CLI `docker login` and GUI login. ### Docker Community Edition 18.03.0-ce-rc3-mac56 2018-03-13 - -* Upgrades +- Upgrades - [Docker 18.03.0-ce-rc3](https://github.com/docker/docker-ce/releases/tag/v18.03.0-ce-rc3) - [Docker Machine 0.14.0](https://github.com/docker/machine/releases/tag/v0.14.0) - [Docker compose 1.20.0-rc2](https://github.com/docker/compose/releases/tag/1.20.0-rc2) - [Notary 0.6.0](https://github.com/docker/notary/releases/tag/v0.6.0) - Linux Kernel 4.9.87 -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix for the HTTP/S transparent proxy when using "localhost" names (for example "host.docker.internal", "docker.for.mac.host.internal", "docker.for.mac.localhost"). - Fix daemon not starting properly when setting TLS-related options. Fixes [docker/for-mac#2663](https://github.com/docker/for-mac/issues/2663) ### Docker Community Edition 18.03.0-ce-rc1-mac54 2018-02-27 - -* Upgrades +- Upgrades - [Docker 18.03.0-ce-rc1](https://github.com/docker/docker-ce/releases/tag/v18.03.0-ce-rc1) -* New +- New - Virtual machine Swap size can be changed in settings. See [docker/for-mac#2566](https://github.com/docker/for-mac/issues/2566), [docker/for-mac#2389](https://github.com/docker/for-mac/issues/2389) - Support NFS Volume sharing. Also works in Kubernetes. -* Bug fixes and minor changes +- Bug fixes and minor changes - Revert the default disk format to qcow2 for users running macOS 10.13 (High Sierra). There are confirmed reports of file corruption using the raw format which uses sparse files on APFS. This change only takes effect after a reset to factory defaults (from the Whale menu -> Preferences -> Reset). Related to [docker/for-mac#2625](https://github.com/docker/for-mac/issues/2625) - DNS name `host.docker.internal` should be used for host resolution from containers. Older aliases (still valid) are deprecated in favor of this one. (See https://tools.ietf.org/html/draft-west-let-localhost-be-localhost-06). - Kubernetes Load balanced services are no longer marked as `Pending`. @@ -859,109 +849,101 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ### Docker Community Edition 18.02.0-ce-mac53 2018-02-09 - -* Upgrades +- Upgrades - [Docker 18.02.0-ce](https://github.com/docker/docker-ce/releases/tag/v18.02.0-ce) - [Docker compose 1.19.0](https://github.com/docker/compose/releases/tag/1.19.0) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix update startup failure in some cases. - Fix empty registry added by mistake in some cases in the Preference Daemon Pane. Fixes [docker/for-mac#2537](https://github.com/docker/for-mac/issues/2537) - Clearer error message when incompatible hardware is detected. Diagnostics are not proposed in the error popup in this case. ### Docker Community Edition 18.02.0-ce-rc2-mac51 2018-02-02 - -* Upgrades +- Upgrades - [Docker 18.02.0-ce-rc2](https://github.com/docker/docker-ce/releases/tag/v18.02.0-ce-rc2) - [Docker compose 1.19.0-rc2](https://github.com/docker/compose/releases/tag/1.19.0-rc2) - [Kubernetes 1.9.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.9.md#v192). If you have Kubernetes enabled, the upgrade will be performed automatically when starting Docker for Mac. -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix Kubernetes-compose integration update that was causing startup failure. Fixes [docker/for-mac#2536](https://github.com/docker/for-mac/issues/2536) - Fix some cases where selecting "Reset" after an error did not reset properly. - Fix incorrect NTP config. Fixes [docker/for-mac#2529](https://github.com/docker/for-mac/issues/2529) ### Docker Community Edition 18.02.0-ce-rc1-mac50 2018-01-26 - -* Upgrades +- Upgrades - [Docker 18.02.0-ce-rc1](https://github.com/docker/docker-ce/releases/tag/v18.02.0-ce-rc1) -* Bug fixes and minor changes +- Bug fixes and minor changes - Added "Restart" menu item. See [docker/for-mac#2407](https://github.com/docker/for-mac/issues/2407) - Keep any existing kubectl binary when activating Kubernetes in Docker for Mac, and restore it when disabling Kubernetes. Fixes [docker/for-mac#2508](https://github.com/docker/for-mac/issues/2508), [docker/for-mac#2368](https://github.com/docker/for-mac/issues/2368) - Fix Kubernetes context selector. Fixes [docker/for-mac#2495](https://github.com/docker/for-mac/issues/2495) ### Docker Community Edition 18.01.0-ce-mac48 2018-01-19 - -* Upgrades +- Upgrades - [Docker 18.01.0-ce](https://github.com/docker/docker-ce/releases/tag/v18.01.0-ce) - Linux Kernel 4.9.75 -* New +- New - The directory holding the disk images was renamed (from `~/Library/Containers/com.docker.docker/Data/com.docker.driver.amd64-linux` to ~/Library/Containers/com.docker.docker/Data/vms/0`). -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix error during resize/create Docker.raw disk image in some cases. Fixes [docker/for-mac#2383](https://github.com/docker/for-mac/issues/2383), [docker/for-mac#2447](https://github.com/docker/for-mac/issues/2447), [docker/for-mac#2453], (https://github.com/docker/for-mac/issues/2453), [docker/for-mac#2420](https://github.com/docker/for-mac/issues/2420) - Fix additional allocated disk space not available in containers. Fixes [docker/for-mac#2449](https://github.com/docker/for-mac/issues/2449) - VPNkit port max idle time default restored to 300s. Fixes [docker/for-mac#2442](https://github.com/docker/for-mac/issues/2442) - Fix using an HTTP proxy with authentication. Fixes [docker/for-mac#2386](https://github.com/docker/for-mac/issues/2386) - - Allow HTTP proxy excludes to be written as .docker.com as well as *.docker.com + - Allow HTTP proxy excludes to be written as .docker.com as well as \*.docker.com - Allow individual IP addresses to be added to HTTP proxy excludes. - - Avoid hitting DNS timeouts when querying docker.for.mac.* when the upstream DNS servers are slow or missing. + - Avoid hitting DNS timeouts when querying docker.for.mac.\* when the upstream DNS servers are slow or missing. - Fix for `docker push` to an insecure registry. Fixes [docker/for-mac#2392](https://github.com/docker/for-mac/issues/2392) - Separate internal ports used to proxy HTTP and HTTPS content. - If kubectl was already installed before Docker For Mac, restore the existing kubectl when switching Kubernetes off in Docker for Mac. - Migration of Docker Toolbox images is not proposed anymore in Docker For Mac installer (still possible to migrate Toolbox images manually). - ### Docker Community Edition 17.12.0-ce-mac45 2018-01-05 - -* Upgrades +- Upgrades - [Docker 17.12.0-ce](https://github.com/docker/docker-ce/releases/tag/v17.12.0-ce) -* New - - Experimental Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker For Mac Preferences and use kubectl commands as well as docker commands. See [the Kubernetes section](/manuals/desktop/features/kubernetes.md) +- New + - Experimental Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker For Mac Preferences and use kubectl commands as well as docker commands. See [the Kubernetes section](/manuals/desktop/use-desktop/kubernetes.md) - DNS name `docker.for.mac.host.internal` should be used instead of `docker.for.mac.localhost` (still valid) for host resolution from containers, since since there is an RFC banning the use of subdomains of localhost (See https://tools.ietf.org/html/draft-west-let-localhost-be-localhost-06). -* Bug fixes and minor changes +- Bug fixes and minor changes - The docker engine is configured to use VPNKit as an HTTP proxy, fixing 'docker pull' in environments with no DNS. Fixes [docker/for-mac#2320](https://github.com/docker/for-mac/issues/2320) ## Edge Releases of 2017 ### Docker Community Edition 17.12.0-ce-rc4-mac44 2017-12-21 - -* Upgrades +- Upgrades - [Docker 17.12.0-ce-rc4](https://github.com/docker/docker-ce/releases/tag/v17.12.0-ce-rc4) - [Docker compose 1.18.0](https://github.com/docker/compose/releases/tag/1.18.0) -* Bug fixes and minor changes +- Bug fixes and minor changes - Display actual size used by the virtual machine disk, especially useful for disks using raw format. See [docker/for-mac#2297](https://github.com/docker/for-mac/issues/2297). - Fix more specific edge cases in filesharing settings migration. ### Docker Community Edition 17.12.0-ce-rc3-mac43 2017-12-15 - -* Upgrades +- Upgrades - [Docker 17.12.0-ce-rc3](https://github.com/docker/docker-ce/releases/tag/v17.12.0-ce-rc3) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix filesharing migration issue ([docker/for-mac#2317](https://github.com/docker/for-mac/issues/2317)) ### Docker Community Edition 17.12.0-ce-rc2-mac41 2017-12-13 -* Upgrades +- Upgrades - [Docker 17.12.0-ce-rc2](https://github.com/docker/docker-ce/releases/tag/v17.12.0-ce-rc2) - [Docker compose 1.18.0-rc2](https://github.com/docker/compose/releases/tag/1.18.0-rc2) -* New +- New - Virtual machine disk size can be changed in settings. (See [docker/for-mac#1037](https://github.com/docker/for-mac/issues/1037)). -* Bug fixes and minor changes +- Bug fixes and minor changes - Avoid virtual machine reboot when changing host proxy settings. - Don't break HTTP traffic between containers by forwarding them through the external proxy [docker/for-mac#981](https://github.com/docker/for-mac/issues/981) - Filesharing settings are now stored in settings.json @@ -971,100 +953,98 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ### Docker Community Edition 17.11.0-ce-mac40 2017-11-22 - -* Upgrades +- Upgrades - [Docker 17.11.0-ce](https://github.com/docker/docker-ce/releases/tag/v17.11.0-ce) ### Docker Community Edition 17.11.0-ce-rc4-mac39 2017-11-17 -* Upgrades +- Upgrades - [Docker 17.11.0-ce-rc4](https://github.com/docker/docker-ce/releases/tag/v17.11.0-ce-rc4) - [Docker compose 1.17.1](https://github.com/docker/compose/releases/tag/1.17.1) - Linux kernel 4.9.60 -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix login into private repository with certificate issue. [docker/for-mac#2201](https://github.com/docker/for-mac/issues/2201) -* New +- New - For systems running APFS on SSD on High Sierra, use `raw` format virtual machine disks by default. This increases disk throughput (from 320MiB/sec to 600MiB/sec in `dd` on a 2015 MacBook Pro) and disk space handling. - Existing disks are kept in qcow format, if you want to switch to raw format you need to "Reset to factory defaults". To query the space usage of the file, use a command like: - `$ cd ~/Library/Containers/com.docker.docker/Data/com.docker.driver.amd64-linux/` - `$ ls -ls Docker.raw` - `3944768 -rw-r--r--@ 1 user staff 68719476736 Nov 16 11:19 Docker.raw` - The first number (`3944768`) is the allocated space in blocks; the larger number `68719476736` is the maximum total amount of space the file may consume in future in bytes. + Existing disks are kept in qcow format, if you want to switch to raw format you need to "Reset to factory defaults". To query the space usage of the file, use a command like: + `$ cd ~/Library/Containers/com.docker.docker/Data/com.docker.driver.amd64-linux/` + `$ ls -ls Docker.raw` + `3944768 -rw-r--r--@ 1 user staff 68719476736 Nov 16 11:19 Docker.raw` + The first number (`3944768`) is the allocated space in blocks; the larger number `68719476736` is the maximum total amount of space the file may consume in future in bytes. ### Docker Community Edition 17.11.0-ce-rc3-mac38 2017-11-09 -* Upgrades +- Upgrades - [Docker 17.11.0-ce-rc3](https://github.com/docker/docker-ce/releases/tag/v17.11.0-ce-rc3) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix Docker build exits successfully but fails to build image [moby/#35413](https://github.com/moby/moby/issues/35413). ### Docker Community Edition 17.11.0-ce-rc2-mac37 2017-11-02 -* Upgrades +- Upgrades - [Docker 17.11.0-ce-rc2](https://github.com/docker/docker-ce/releases/tag/v17.11.0-ce-rc2) - [Docker compose 1.17.0](https://github.com/docker/compose/releases/tag/1.17.0) - Linuxkit blueprint updated to [linuxkit/linuxkit#2633](https://github.com/linuxkit/linuxkit/pull/2633), fixes CVE-2017-15650 -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix centos:5 & centos:6 images not starting properly with LinuxKit virtual machine (fixes [docker/for-mac#2169](https://github.com/docker/for-mac/issues/2169)). - ### Docker Community Edition 17.10.0-ce-mac36 2017-10-24 -* Upgrades +- Upgrades - [Docker 17.10.0-ce](https://github.com/docker/docker-ce/releases/tag/v17.10.0-ce) - [Docker Machine 0.13.0](https://github.com/docker/machine/releases/tag/v0.13.0) - [Docker compose 1.17.0-rc1](https://github.com/docker/compose/releases/tag/1.17.0-rc1) -* New +- New - Virtual machine entirely built with Linuxkit ### Docker Community Edition 17.09.0-ce-mac34 2017-10-06 -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix Docker For Mac unable to start in some cases : removed use of libgmp sometimes causing the vpnkit process to die. ### Docker Community Edition 17.09.0-ce-mac31 2017-09-29 -* Upgrades +- Upgrades - [Docker 17.09.0-ce](https://github.com/docker/docker-ce/releases/tag/v17.09.0-ce) - DataKit update (fix instability on High Sierra) -* Bug fixes and minor changes +- Bug fixes and minor changes - Fix password encoding/decoding. May require to re-login to docker cloud after this version is installed. (Fixes:docker/for-mac#2008, docker/for-mac#2016, docker/for-mac#1919, docker/for-mac#712, docker/for-mac#1220). ### Docker Community Edition 17.09.0-ce-rc3-mac30 2017-09-22 -* Upgrades +- Upgrades - [Docker 17.09.0-ce-rc3](https://github.com/docker/docker-ce/releases/tag/v17.09.0-ce-rc3) ### Docker Community Edition 17.09.0-ce-rc2-mac29 2017-09-19 -* Upgrades +- Upgrades - [Docker 17.09.0-ce-rc2](https://github.com/docker/docker-ce/releases/tag/v17.09.0-ce-rc2) - Linux Kernel 4.9.49 - AUFS 20170911 -* Bug fixes and minor changes +- Bug fixes and minor changes - Kernel: Enable TASK_XACCT and TASK_IO_ACCOUNTING [docker/for-mac#1608](https://github.com/docker/for-mac/issues/1608) - Rotate logs in the virtual machine more often ### Docker Community Edition 17.09.0-ce-rc1-mac28 2017-09-07 -* Upgrades +- Upgrades - [Docker 17.09.0-ce-rc1](https://github.com/docker/docker-ce/releases/tag/v17.09.0-ce-rc1) - [Docker compose 1.16.1](https://github.com/docker/compose/releases/tag/1.16.1) - Linux Kernel 4.9.46 -* Bug fixes and minor changes +- Bug fixes and minor changes - VPNKit: change protocol to support error messages reported back from the server ### Docker Community Edition 17.07.0-ce-mac26, 2017-09-01 -* Upgrades +- Upgrades - [Docker 17.07.0-ce](https://github.com/docker/docker-ce/releases/tag/v17.07.0-ce) - [Docker compose 1.16.0](https://github.com/docker/compose/releases/tag/1.16.0) - [Docker Credential Helpers 0.6.0](https://github.com/docker/docker-credential-helpers/releases/tag/v0.6.0) @@ -1080,8 +1060,8 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Fixed string validation in daemon options (related to [docker/for-mac#1971](https://github.com/docker/for-mac/issues/1971)) - VPNKit: Fixed a bug which causes a socket to leak if the corresponding -TCP connection is idle for more than five minutes (related to -[docker/for-mac#1374](https://github.com/docker/for-mac/issues/1374)) + TCP connection is idle for more than five minutes (related to + [docker/for-mac#1374](https://github.com/docker/for-mac/issues/1374)) ### Docker Community Edition 17.07.0-ce-rc3-mac23, 2017-08-21 @@ -1126,7 +1106,7 @@ TCP connection is idle for more than five minutes (related to - Add daemon options validation - Diagnose can be cancelled & Improved help information. Fixes [docker/for-mac#1134](https://github.com/docker/for-mac/issues/1134), [docker/for-mac#1474](https://github.com/docker/for-mac/issues/1474) -- Support paging of Docker Cloud [repositories](../../docker-hub/repos/_index.md) and [organizations](../../admin/organization/orgs.md). Fixes [docker/for-mac#1538](https://github.com/docker/for-mac/issues/1538) +- Support paging of Docker Cloud [repositories](../../docker-hub/repos/_index.md) and [organizations](../../admin/organization/setup/orgs.md). Fixes [docker/for-mac#1538](https://github.com/docker/for-mac/issues/1538) ### Docker Community Edition 17.06.1-ce-mac20, 2017-07-18 @@ -1218,7 +1198,6 @@ TCP connection is idle for more than five minutes (related to - [Docker 17.05.0-ce-rc1](https://github.com/docker/docker/releases/tag/v17.05.0-ce-rc1) - ### Docker Community Edition 17.04.0-ce-mac7, 2017-04-06 **New** @@ -1255,7 +1234,6 @@ TCP connection is idle for more than five minutes (related to - Disk trimming should work as expected - Diagnostics now contains more settings data - ### Docker Community Edition 17.03.1-ce-rc1-mac3, 2017-03-28 **Upgrades** @@ -1390,7 +1368,6 @@ TCP connection is idle for more than five minutes (related to - Fix for swap not being mounted - Fix AUFS xattr delete issue ([docker/docker#30245](https://github.com/docker/docker/issues/30245)) - ### Beta 38 Release Notes (2017-01-20 1.13.0-beta38) **Upgrades** @@ -1613,7 +1590,6 @@ TCP connection is idle for more than five minutes (related to - Improvements to Logging and Diagnostics - osxfs: switched to `libev/kqueue` to improve latency - ### Beta 29.3 Release Notes (2016-11-02 1.12.3-beta29.3) **Upgrades** @@ -1651,7 +1627,7 @@ TCP connection is idle for more than five minutes (related to - Fixed application of system or custom proxy settings over container restart - Increased default ulimit for memlock (fixes [docker/for-mac#801](https://github.com/docker/for-mac/issues/801) ) - Fixed an issue where the Docker status would continue to be - yellow/animated after the VM had started correctly + yellow/animated after the VM had started correctly - osxfs: fixed the prohibition of chown on read-only or mode 0 files (fixes [docker/for-mac#117](https://github.com/docker/for-mac/issues/117), [docker/for-mac#263](https://github.com/docker/for-mac/issues/263), [docker/for-mac#633](https://github.com/docker/for-mac/issues/633) ) ### Beta 28 Release Notes (2016-10-13 1.12.2-rc3-beta28) @@ -1673,179 +1649,179 @@ TCP connection is idle for more than five minutes (related to **Upgrades** -* Docker 1.12.2-rc1 -* Docker Machine 0.8.2 -* Docker compose 1.8.1 -* Kernel vsock driver v7 -* Kernel 4.4.21 -* AUFS 20160912 +- Docker 1.12.2-rc1 +- Docker Machine 0.8.2 +- Docker compose 1.8.1 +- Kernel vsock driver v7 +- Kernel 4.4.21 +- AUFS 20160912 **Bug fixes and minor changes** -* Fixed an issue where some windows did not claim focus correctly -* Added UI when switching channel to prevent user losing containers and settings -* Check disk capacity before Toolbox import -* Import certificates in `etc/ssl/certs/ca-certificates.crt` -* DNS: reduce the number of UDP sockets consumed on the host -* VPNkit: improve the connection-limiting code to avoid running out of sockets on the host -* UDP: handle diagrams bigger than 2035, up to the configured macOS kernel limit -* UDP: made the forwarding more robust; now, drop packets and continue rather than stopping -* disk: made the "flush" behaviour configurable for database-like workloads. This works around a performance regression in `v1.12.1`. +- Fixed an issue where some windows did not claim focus correctly +- Added UI when switching channel to prevent user losing containers and settings +- Check disk capacity before Toolbox import +- Import certificates in `etc/ssl/certs/ca-certificates.crt` +- DNS: reduce the number of UDP sockets consumed on the host +- VPNkit: improve the connection-limiting code to avoid running out of sockets on the host +- UDP: handle diagrams bigger than 2035, up to the configured macOS kernel limit +- UDP: made the forwarding more robust; now, drop packets and continue rather than stopping +- disk: made the "flush" behaviour configurable for database-like workloads. This works around a performance regression in `v1.12.1`. ### Beta 26 Release Notes (2016-09-14 1.12.1-beta26) **New** -* Improved support for macOS 10.12 Sierra +- Improved support for macOS 10.12 Sierra **Upgrades** -* Linux kernel 4.4.20 -* AUFS 20160905 +- Linux kernel 4.4.20 +- AUFS 20160905 **Bug fixes and minor changes** -* Fixed communications glitch when UI talks to `com.docker.vmnetd`. Fixes [docker/for-mac#90](https://github.com/docker/for-mac/issues/90) +- Fixed communications glitch when UI talks to `com.docker.vmnetd`. Fixes [docker/for-mac#90](https://github.com/docker/for-mac/issues/90) -* UI fix for macOs 10.12 +- UI fix for macOs 10.12 -* Windows open on top of full screen app are available in all spaces +- Windows open on top of full screen app are available in all spaces -* Reporting a bug, while not previously logged into GitHub now works +- Reporting a bug, while not previously logged into GitHub now works -* When a diagnostic upload fails, the error is properly reported +- When a diagnostic upload fails, the error is properly reported -* `docker-diagnose` displays and records the time the diagnosis was captured +- `docker-diagnose` displays and records the time the diagnosis was captured -* Ports are allowed to bind to host addresses other than `0.0.0.0` and `127.0.0.1`. Fixes issue reported in [docker/for-mac#68](https://github.com/docker/for-mac/issues/68). +- Ports are allowed to bind to host addresses other than `0.0.0.0` and `127.0.0.1`. Fixes issue reported in [docker/for-mac#68](https://github.com/docker/for-mac/issues/68). -* We no longer compute the container folder in `com.docker.vmnetd`. Fixes [docker/for-mac#47](https://github.com/docker/for-mac/issues/47). +- We no longer compute the container folder in `com.docker.vmnetd`. Fixes [docker/for-mac#47](https://github.com/docker/for-mac/issues/47). **Known Issues** -* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The -issue is being investigated. The workaround is to restart Docker.app. +- `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The + issue is being investigated. The workaround is to restart Docker.app. -* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and -traversals of large directories are currently slow. Additionally, containers -that perform large numbers of directory operations, such as repeated scans of -large directory trees, may suffer from poor performance. More information is -available in [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in Troubleshooting. +- There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and + traversals of large directories are currently slow. Additionally, containers + that perform large numbers of directory operations, such as repeated scans of + large directory trees, may suffer from poor performance. More information is + available in [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in Troubleshooting. -* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart `Docker.app`. +- Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart `Docker.app`. ### Beta 25 Release Notes (2016-09-07 1.12.1-beta25) **Upgrades** -* Experimental support for macOS 10.12 Sierra (beta) +- Experimental support for macOS 10.12 Sierra (beta) **Bug fixes and minor changes** -* VPNKit supports search domains -* Entries from `/etc/hosts` should now resolve from within containers -* osxfs: fix thread leak +- VPNKit supports search domains +- Entries from `/etc/hosts` should now resolve from within containers +- osxfs: fix thread leak **Known issues** -* Several problems have been reported on macOS 10.12 Sierra and are being -investigated. This includes failure to launch the app and being unable to -upgrade to a new version. +- Several problems have been reported on macOS 10.12 Sierra and are being + investigated. This includes failure to launch the app and being unable to + upgrade to a new version. -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The -issue is being investigated. The workaround is to restart Docker.app +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The + issue is being investigated. The workaround is to restart Docker.app -* There are a number of issues with the performance of directories bind-mounted -with `osxfs`. In particular, writes of small blocks and traversals of large -directories are currently slow. Additionally, containers that perform large -numbers of directory operations, such as repeated scans of large directory -trees, may suffer from poor performance. More information is available in -[Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in Troubleshooting. +- There are a number of issues with the performance of directories bind-mounted + with `osxfs`. In particular, writes of small blocks and traversals of large + directories are currently slow. Additionally, containers that perform large + numbers of directory operations, such as repeated scans of large directory + trees, may suffer from poor performance. More information is available in + [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in Troubleshooting. -* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app. +- Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app. ### Beta 24 Release Notes (2016-08-23 1.12.1-beta24) **Upgrades** -* Docker 1.12.1 -* Docker Machine 0.8.1 -* Linux kernel 4.4.19 -* AUFS 20160822 +- Docker 1.12.1 +- Docker Machine 0.8.1 +- Linux kernel 4.4.19 +- AUFS 20160822 **Bug fixes and minor changes** -* osxfs: fixed a malfunction of new directories that have the same name as an old directory that is still open +- osxfs: fixed a malfunction of new directories that have the same name as an old directory that is still open -* osxfs: rename events now trigger DELETE and/or MODIFY `inotify` events (saving with TextEdit works now) +- osxfs: rename events now trigger DELETE and/or MODIFY `inotify` events (saving with TextEdit works now) -* slirp: support up to 8 external DNS servers +- slirp: support up to 8 external DNS servers -* slirp: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules will time out earlier than expected +- slirp: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules will time out earlier than expected -* The app warns user if BlueStacks is installed (potential kernel panic) +- The app warns user if BlueStacks is installed (potential kernel panic) **Known issues** -* Several problems have been reported on macOS 10.12 Sierra and are being investigated. This includes failure to launch the app and being unable to upgrade to a new version. +- Several problems have been reported on macOS 10.12 Sierra and are being investigated. This includes failure to launch the app and being unable to upgrade to a new version. -* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`. +- `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`. -* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large -directories are currently slow. Additionally, containers that perform large -numbers of directory operations, such as repeated scans of large directory -trees, may suffer from poor performance. For more information and workarounds, see the bullet on performance of bind-mounted directories in [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in Troubleshooting. +- There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large + directories are currently slow. Additionally, containers that perform large + numbers of directory operations, such as repeated scans of large directory + trees, may suffer from poor performance. For more information and workarounds, see the bullet on performance of bind-mounted directories in [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in Troubleshooting. -* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart `Docker.app`. +- Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart `Docker.app`. ### Beta 23 Release Notes (2016-08-16 1.12.1-rc1-beta23) **Upgrades** -* Docker 1.12.1-rc1 -* Linux kernel 4.4.17 -* AUFS 20160808 +- Docker 1.12.1-rc1 +- Linux kernel 4.4.17 +- AUFS 20160808 **Bug fixes and minor changes** -* Moby: use default sysfs settings, transparent huge pages disabled -* Moby: cgroup mount to support systemd in containers -* osxfs: fixed an issue that caused `inotify` failure and crashes -* osxfs: fixed a directory fd leak -* Zsh completions +- Moby: use default sysfs settings, transparent huge pages disabled +- Moby: cgroup mount to support systemd in containers +- osxfs: fixed an issue that caused `inotify` failure and crashes +- osxfs: fixed a directory fd leak +- Zsh completions **Known issues** -* Docker for Mac is not supported on macOS 10.12 Sierra +- Docker for Mac is not supported on macOS 10.12 Sierra -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app -* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. For more information and workarounds, see the bullet on performance of bind-mounted directories in [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in Troubleshooting. +- There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. For more information and workarounds, see the bullet on performance of bind-mounted directories in [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in Troubleshooting. -* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app +- Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app ### Beta 22 Release Notes (2016-08-11 1.12.0-beta22) **Upgrades** -* Linux kernel to 4.4.16 +- Linux kernel to 4.4.16 **Bug fixes and minor changes** -* Increase Moby fs.file-max to 524288 -* Use Mac System Configuration database to detect DNS -* HyperKit updated with dtrace support and lock fixes -* Fix Moby Diagnostics and Update Kernel -* UI Fixes -* osxfs: fix socket chowns +- Increase Moby fs.file-max to 524288 +- Use Mac System Configuration database to detect DNS +- HyperKit updated with dtrace support and lock fixes +- Fix Moby Diagnostics and Update Kernel +- UI Fixes +- osxfs: fix socket chowns **Known issues** -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app -* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. More information is available in [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). +- There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. More information is available in [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). -* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app +- Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app ### Beta 21.1 Release Notes (2016-08-03 1.12.0-beta21.1) @@ -1855,86 +1831,86 @@ events or unexpected unmounts. **Hotfixes** -* osxfs: fixed an issue causing access to children of renamed directories to fail (symptoms: npm failures, apt-get failures) +- osxfs: fixed an issue causing access to children of renamed directories to fail (symptoms: npm failures, apt-get failures) -* osxfs: fixed an issue causing some ATTRIB and CREATE `inotify` events to fail delivery and other `inotify` events to stop +- osxfs: fixed an issue causing some ATTRIB and CREATE `inotify` events to fail delivery and other `inotify` events to stop -* osxfs: fixed an issue causing all `inotify` events to stop when an ancestor directory of a mounted directory was mounted +- osxfs: fixed an issue causing all `inotify` events to stop when an ancestor directory of a mounted directory was mounted -* osxfs: fixed an issue causing volumes mounted under other mounts to spontaneously unmount (docker/docker#24503) +- osxfs: fixed an issue causing volumes mounted under other mounts to spontaneously unmount (docker/docker#24503) ### Docker for Mac 1.12.0 (2016-07-28 1.12.0-beta21) **New** -* Docker for Mac is now available from 2 channels: **stable** and **beta**. New features and bug fixes will go out first in auto-updates to users in the beta channel. Updates to the stable channel are much less frequent and happen in sync with major and minor releases of the Docker Engine. Only features that are well-tested and ready for production are added to the stable channel releases. For downloads of both and more information, see the [installation guide](/manuals/desktop/setup/install/mac-install.md). +- Docker for Mac is now available from 2 channels: **stable** and **beta**. New features and bug fixes will go out first in auto-updates to users in the beta channel. Updates to the stable channel are much less frequent and happen in sync with major and minor releases of the Docker Engine. Only features that are well-tested and ready for production are added to the stable channel releases. For downloads of both and more information, see the [installation guide](/manuals/desktop/setup/install/mac-install.md). **Upgrades** -* Docker 1.12.0 with experimental features -* Docker Machine 0.8.0 -* Docker Compose 1.8.0 +- Docker 1.12.0 with experimental features +- Docker Machine 0.8.0 +- Docker Compose 1.8.0 **Bug fixes and minor changes** -* Check for updates, auto-update and diagnose can be run by non-admin users -* osxfs: fixed an issue causing occasional incorrect short reads -* osxfs: fixed an issue causing occasional EIO errors -* osxfs: fixed an issue causing `inotify` creation events to fail -* osxfs: increased the `fs.inotify.max_user_watches` limit in Moby to 524288 -* The UI shows documentation link for sharing volumes -* Clearer error message when running with outdated VirtualBox version -* Added link to sources for qemu-img +- Check for updates, auto-update and diagnose can be run by non-admin users +- osxfs: fixed an issue causing occasional incorrect short reads +- osxfs: fixed an issue causing occasional EIO errors +- osxfs: fixed an issue causing `inotify` creation events to fail +- osxfs: increased the `fs.inotify.max_user_watches` limit in Moby to 524288 +- The UI shows documentation link for sharing volumes +- Clearer error message when running with outdated VirtualBox version +- Added link to sources for qemu-img **Known issues** -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app -* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks, and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. For more information and workarounds, see [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in [Logs and Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md). +- There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks, and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. For more information and workarounds, see [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) in [Logs and Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md). -* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app +- Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app ### Beta 20 Release Notes (2016-07-19 1.12.0-rc4-beta20) **Bug fixes and minor changes** -* Fixed `docker.sock` permission issues -* Don't check for update when the settings panel opens -* Removed obsolete DNS workaround -* Use the secondary DNS server in more circumstances -* Limit the number of concurrent port forwards to avoid running out of resources -* Store the database as a "bare" git repo to avoid corruption problems +- Fixed `docker.sock` permission issues +- Don't check for update when the settings panel opens +- Removed obsolete DNS workaround +- Use the secondary DNS server in more circumstances +- Limit the number of concurrent port forwards to avoid running out of resources +- Store the database as a "bare" git repo to avoid corruption problems **Known issues** -* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker for Mac (`Docker.app`). +- `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker for Mac (`Docker.app`). ### Beta 19 Release Notes (2016-07-14 1.12.0-rc4-beta19) **New** -* Added privacy tab in settings -* Allow the definition of HTTP proxy overrides in the UI +- Added privacy tab in settings +- Allow the definition of HTTP proxy overrides in the UI **Upgrades** -* Docker 1.12.0 RC4 -* Docker Compose 1.8.0 RC2 -* Docker Machine 0.8.0 RC2 -* Linux kernel 4.4.15 +- Docker 1.12.0 RC4 +- Docker Compose 1.8.0 RC2 +- Docker Machine 0.8.0 RC2 +- Linux kernel 4.4.15 **Bug fixes and minor changes** -* Filesystem sharing permissions can only be configured in the UI (no more `/Mac` in moby) -* `com.docker.osx.xhyve.hyperkit`: increased max number of fds to 10240 -* Improved Moby syslog facilities -* Improved file-sharing tab -* `com.docker.slirp`: included the DNS TCP fallback fix, required when UDP responses are truncated -* `docker build/events/logs/stats...` won't leak when iterrupted with Ctrl-C +- Filesystem sharing permissions can only be configured in the UI (no more `/Mac` in moby) +- `com.docker.osx.xhyve.hyperkit`: increased max number of fds to 10240 +- Improved Moby syslog facilities +- Improved file-sharing tab +- `com.docker.slirp`: included the DNS TCP fallback fix, required when UDP responses are truncated +- `docker build/events/logs/stats...` won't leak when interrupted with Ctrl-C **Known issues** -* See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). +- See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). ### Beta 18.1 Release Notes (2016-07-07 1.12.0-rc3-beta18.1) @@ -1944,176 +1920,175 @@ events or unexpected unmounts. **Hotfix** -* Fixed issue resulting in error "Hijack is incompatible with use of CloseNotifier", reverts previous fix for `Ctrl-C` during build. +- Fixed issue resulting in error "Hijack is incompatible with use of CloseNotifier", reverts previous fix for `Ctrl-C` during build. **New** -* New host/container file sharing UI -* `/Mac` bind mount prefix is deprecated and will be removed soon +- New host/container file sharing UI +- `/Mac` bind mount prefix is deprecated and will be removed soon **Upgrades** -* Docker 1.12.0 RC3 +- Docker 1.12.0 RC3 **Bug fixes and minor changes** -* VPNKit: Improved scalability as number of network connections increases -* The docker API proxy was failing to deal with some 1.12 features, such as health check. +- VPNKit: Improved scalability as number of network connections increases +- The docker API proxy was failing to deal with some 1.12 features, such as health check. **Known issues** -* See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). +- See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). ### Beta 18 Release Notes (2016-07-06 1.12.0-rc3-beta18) **New** -* New host/container file sharing UI -* `/Mac` bind mount prefix is deprecated and will be removed soon +- New host/container file sharing UI +- `/Mac` bind mount prefix is deprecated and will be removed soon **Upgrades** -* Docker 1.12.0 RC3 +- Docker 1.12.0 RC3 **Bug fixes and minor changes** -* VPNKit: Improved scalability as number of network connections increases -* Interrupting a `docker build` with Ctrl-C will actually stop the build -* The docker API proxy was failing to deal with some 1.12 features, such as health check. +- VPNKit: Improved scalability as number of network connections increases +- Interrupting a `docker build` with Ctrl-C will actually stop the build +- The docker API proxy was failing to deal with some 1.12 features, such as health check. **Known issues** -* See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). +- See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). ### Beta 17 Release Notes (2016-06-29 1.12.0-rc2-beta17) **Upgrades** -* Linux kernel 4.4.14, AUFS 20160627 +- Linux kernel 4.4.14, AUFS 20160627 **Bug fixes and minor changes** -* Documentation moved to [/desktop/mac/](../_index.md) -* Allow non-admin users to launch the app for the first time (using admin creds) -* Prompt non-admin users for admin password when needed in Preferences -* Fixed download links, documentation links -* Fixed "failure: No error" message in diagnostic panel -* Improved diagnostics for networking and logs for the service port openers +- Documentation moved to [/desktop/mac/](../_index.md) +- Allow non-admin users to launch the app for the first time (using admin creds) +- Prompt non-admin users for admin password when needed in Preferences +- Fixed download links, documentation links +- Fixed "failure: No error" message in diagnostic panel +- Improved diagnostics for networking and logs for the service port openers **Known issues** -* See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). +- See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). ### Beta 16 Release Notes (2016-06-17 1.12.0-rc2-beta16) **Upgrades** -* Docker 1.12.0 RC2 -* docker-compose 1.8.0 RC1 -* docker-machine 0.8.0 RC1 -* notary 0.3 -* Alpine 3.4 +- Docker 1.12.0 RC2 +- docker-compose 1.8.0 RC1 +- docker-machine 0.8.0 RC1 +- notary 0.3 +- Alpine 3.4 **Bug fixes and minor changes** -* VPNKit: Fixed a regressed error message when a port is in use -* Fixed UI crashing with `NSInternalInconsistencyException` / fixed leak -* HyperKit API: Improved error reporting -* osxfs: fix sporadic EBADF due to fd access/release races (#3683) - +- VPNKit: Fixed a regressed error message when a port is in use +- Fixed UI crashing with `NSInternalInconsistencyException` / fixed leak +- HyperKit API: Improved error reporting +- osxfs: fix sporadic EBADF due to fd access/release races (#3683) **Known issues** -* See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). +- See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). ### Beta 15 Release Notes (2016-06-10 1.11.2-beta15) **New** -* Registry mirror and insecure registries can now be configured from Preferences -* Virtual machine can now be restarted from Preferences -* `sysctl.conf` can be edited from Preferences +- Registry mirror and insecure registries can now be configured from Preferences +- Virtual machine can now be restarted from Preferences +- `sysctl.conf` can be edited from Preferences **Upgrades** -* Docker 1.11.2 -* Linux 4.4.12, `aufs` 20160530 +- Docker 1.11.2 +- Linux 4.4.12, `aufs` 20160530 **Bug fixes and minor changes** -* Timekeeping in Moby VM improved -* Number of concurrent TCP/UDP connections increased in VPNKit -* Hyperkit: `vsock` stability improvements -* Fixed crash when user is admin +- Timekeeping in Moby VM improved +- Number of concurrent TCP/UDP connections increased in VPNKit +- Hyperkit: `vsock` stability improvements +- Fixed crash when user is admin **Known issues** -* See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). +- See [Known Issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). ### Beta 14 Release Notes (2016-06-02 1.11.1-beta14) **New** -* New settings menu item, **Diagnose & Feedback**, is available to run diagnostics and upload logs to Docker. +- New settings menu item, **Diagnose & Feedback**, is available to run diagnostics and upload logs to Docker. **Known issues** -* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode with macOS 10.10. The issue is being investigated. The workaround is to restart `Docker.app`. +- `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode with macOS 10.10. The issue is being investigated. The workaround is to restart `Docker.app`. **Bug fixes and minor changes** -* `osxfs`: now support `statfs` -* **Preferences**: updated toolbar icons -* Fall back to secondary DNS server if primary fails. -* Added a link to the documentation from menu. +- `osxfs`: now support `statfs` +- **Preferences**: updated toolbar icons +- Fall back to secondary DNS server if primary fails. +- Added a link to the documentation from menu. ### Beta 13.1 Release Notes (2016-05-28 1.11.1-beta13.1) **Hotfixes** -* `osxfs`: +- `osxfs`: - Fixed sporadic EBADF errors and End_of_file crashes due to a race corrupting node table invariants - Fixed a crash after accessing a sibling of a file moved to another directory caused by a node table invariant violation -* Fixed issue where Proxy settings were applied on network change, causing docker daemon to restart too often -* Fixed issue where log file sizes doubled on docker daemon restart +- Fixed issue where Proxy settings were applied on network change, causing docker daemon to restart too often +- Fixed issue where log file sizes doubled on docker daemon restart ### Beta 13 Release Notes (2016-05-25 1.11.1-beta13) **New** -* `osxfs`: Enabled 10ms dcache for 3x speedup on a `go list ./...` test against docker/machine. Workloads heavy in file system path resolution (common among dynamic languages and build systems) will have those resolutions performed in amortized constant time rather than time linear in the depth of the path so speedups of 2-10x will be common. +- `osxfs`: Enabled 10ms dcache for 3x speedup on a `go list ./...` test against docker/machine. Workloads heavy in file system path resolution (common among dynamic languages and build systems) will have those resolutions performed in amortized constant time rather than time linear in the depth of the path so speedups of 2-10x will be common. -* Support multiple users on the same machine, non-admin users can use the app as long as `vmnetd` has been installed. Currently, only one user can be logged in at the same time. +- Support multiple users on the same machine, non-admin users can use the app as long as `vmnetd` has been installed. Currently, only one user can be logged in at the same time. -* Basic support for using system HTTP/HTTPS proxy in docker daemon +- Basic support for using system HTTP/HTTPS proxy in docker daemon **Known issues** -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. **Bug fixes and minor changes** -* `osxfs`: +- `osxfs`: - setting `atime` and `mtime` of nodes is now supported - Fixed major regression in Beta 12 with ENOENT, ENOTEMPY, and other spurious errors after a directory rename. This manifested as `npm install` failure and other directory traversal issues. - Fixed temporary file ENOENT errors - Fixed in-place editing file truncation error, such as when running `perl -i` -* improved time synchronisation after sleep +- improved time synchronisation after sleep ### Beta 12 Release (2016-05-17 1.11.1-beta12) **Upgrades** -* FUSE 7.23 for osxfs +- FUSE 7.23 for osxfs **Known issues** -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. **Bug fixes and minor changes** -* UI improvements -* Fixed a problem in osxfs where`mkdir` returned EBUSY but directory was created. +- UI improvements +- Fixed a problem in osxfs where`mkdir` returned EBUSY but directory was created. ### Beta 11 Release (2016-05-10 1.11.1-beta11) @@ -2123,16 +2098,16 @@ The `osxfs` file system now persists ownership changes in an extended attribute. **Upgrades** -* docker-compose 1.7.1 (see [changelog](https://github.com/docker/compose/releases/tag/1.7.1)) -* Linux kernel 4.4.9 +- docker-compose 1.7.1 (see [changelog](https://github.com/docker/compose/releases/tag/1.7.1)) +- Linux kernel 4.4.9 **Bug fixes and minor changes** -* Desktop notifications after successful update -* No "update available" popup during install process -* Fixed repeated bind of privileged ports -* `osxfs`: Fixed the block count reported by stat -* Moby (Backend) fixes: +- Desktop notifications after successful update +- No "update available" popup during install process +- Fixed repeated bind of privileged ports +- `osxfs`: Fixed the block count reported by stat +- Moby (Backend) fixes: - Fixed `vsock` half closed issue - Added NFS support - Hostname is now Moby, not Docker @@ -2143,145 +2118,141 @@ The `osxfs` file system now persists ownership changes in an extended attribute. **New** -* Token validation is now done over an actual SSL tunnel (HTTPS). (This should fix issues with antivirus applications.) +- Token validation is now done over an actual SSL tunnel (HTTPS). (This should fix issues with antivirus applications.) **Upgrades** -* Docker 1.11.1 +- Docker 1.11.1 **Bug fixes and minor changes** -* UCP now starts again -* Include debugging symbols in HyperKit -* vsock stability improvements -* Addressed glitches in **Preferences** panel -* Fixed issues impacting the “whale menu” -* Fixed uninstall process -* HyperKit vcpu state machine improvements, may improve suspend/resume - +- UCP now starts again +- Include debugging symbols in HyperKit +- vsock stability improvements +- Addressed glitches in **Preferences** panel +- Fixed issues impacting the “whale menu” +- Fixed uninstall process +- HyperKit vcpu state machine improvements, may improve suspend/resume ### Beta 9 Release (2016-04-26 1.11.0-beta9) **New** -* New Preferences window - memory and vCPUs now adjustable -* `localhost` is now used for port forwarding by default.`docker.local` will no longer work as of Beta 9. +- New Preferences window - memory and vCPUs now adjustable +- `localhost` is now used for port forwarding by default.`docker.local` will no longer work as of Beta 9. **Known issues** -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. **Bug fixes and minor changes** -* Fix loopback device naming -* Improved docker socket download and osxfs sequential write by 20% -* `com.docker.osxfs` +- Fix loopback device naming +- Improved docker socket download and osxfs sequential write by 20% +- `com.docker.osxfs` - improved sequential read throughput by up to 20% - improved `readdir` performance by up to 6x - log all fatal exceptions -* More reliable DNS forwarding over UDP and TCP -* UDP ports can be proxied over vsock -* Fixed EADDRINUSE (manifesting as errno 526) when ports are re-used -* Send ICMP when asked to not fragment and we can't guarantee it -* Fixed parsing of UDP datagrams with IP socket options -* Drop abnormally large ethernet frames -* Improved HyperKit logging -* Record virtual machine start and stop events +- More reliable DNS forwarding over UDP and TCP +- UDP ports can be proxied over vsock +- Fixed EADDRINUSE (manifesting as errno 526) when ports are re-used +- Send ICMP when asked to not fragment and we can't guarantee it +- Fixed parsing of UDP datagrams with IP socket options +- Drop abnormally large ethernet frames +- Improved HyperKit logging +- Record virtual machine start and stop events ### Beta 8 Release (2016-04-20 1.11.0-beta8) **New** -* Networking mode switched to VPN compatible by default, and as part of this change the overall experience has been improved: - - `docker.local` now works in VPN compatibility mode - - exposing ports on the Mac is available in both networking modes - - port forwarding of privileged ports now works in both networking modes - - traffic to external DNS servers is no longer dropped in VPN mode - +- Networking mode switched to VPN compatible by default, and as part of this change the overall experience has been improved: -* `osxfs` now uses `AF_VSOCK` for transport giving ~1.8x speedup for large sequential read/write workloads but increasing latency by ~1.3x. `osxfs` performance engineering work continues. +* `docker.local` now works in VPN compatibility mode +* exposing ports on the Mac is available in both networking modes +* port forwarding of privileged ports now works in both networking modes +* traffic to external DNS servers is no longer dropped in VPN mode +- `osxfs` now uses `AF_VSOCK` for transport giving ~1.8x speedup for large sequential read/write workloads but increasing latency by ~1.3x. `osxfs` performance engineering work continues. **Known issues** -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app` +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app` **Bug fixes and minor changes** -* Apple System Log now used for most logs instead of direct filesystem logging -* `docker_proxy` fixes -* Merged HyperKit upstream patches -* Improved error reporting in `nat` network mode -* `osxfs` `transfused` client now logs over `AF_VSOCK` -* Fixed a `com.docker.osx.HyperKit.linux` supervisor deadlock if processes exit during a controlled shutdown -* Fixed VPN mode malformed DNS query bug preventing some resolutions - +- Apple System Log now used for most logs instead of direct filesystem logging +- `docker_proxy` fixes +- Merged HyperKit upstream patches +- Improved error reporting in `nat` network mode +- `osxfs` `transfused` client now logs over `AF_VSOCK` +- Fixed a `com.docker.osx.HyperKit.linux` supervisor deadlock if processes exit during a controlled shutdown +- Fixed VPN mode malformed DNS query bug preventing some resolutions ### Beta 7 Release (2016-04-12 1.11.0-beta7) **New** -* Docs are updated per the Beta 7 release -* Use AF_VSOCK for docker socket transport +- Docs are updated per the Beta 7 release +- Use AF_VSOCK for docker socket transport **Upgrades** -* docker 1.11.0-rc5 -* docker-machine 0.7.0-rc3 -* docker-compose 1.7.0rc2 - +- docker 1.11.0-rc5 +- docker-machine 0.7.0-rc3 +- docker-compose 1.7.0rc2 **Known issues** -* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app -* If VPN mode is enabled and then disabled and then re-enabled again, `docker ps` will block for 90s +- If VPN mode is enabled and then disabled and then re-enabled again, `docker ps` will block for 90s **Bug fixes and minor changes** -* Logging improvements -* Improve process management +- Logging improvements +- Improve process management ### Beta 6 Release (2016-04-05 1.11.0-beta6) **New** -* Docs are updated per the Beta 6 release -* Added uninstall option in user interface +- Docs are updated per the Beta 6 release +- Added uninstall option in user interface **Upgrades** -* docker 1.11.0-rc5 -* docker-machine 0.7.0-rc3 -* docker-compose 1.7.0rc2 +- docker 1.11.0-rc5 +- docker-machine 0.7.0-rc3 +- docker-compose 1.7.0rc2 **Known issues** -* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. -The issue is being investigated. The workaround is to restart -`Docker.app`. +- `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. + The issue is being investigated. The workaround is to restart + `Docker.app`. -* If VPN mode is enabled, then disabled and re-enabled again, -`docker ps` will block for 90 seconds. +- If VPN mode is enabled, then disabled and re-enabled again, + `docker ps` will block for 90 seconds. **Bug fixes and minor changes** -* Fixed osxfs multiple same directory bind mounts stopping inotify -* Fixed osxfs `setattr` on mode 0 files (`sed` failures) -* Fixed osxfs blocking all operations during `readdir` -* Fixed osxfs mishandled errors which crashed the file system and VM -* Removed outdated `lofs`/`9p` support -* Added more debugging info to logs uploaded by `pinata diagnose` -* Improved diagnostics from within the virtual machine -* VirtualBox version check now also works without VBoxManage in path -* VPN mode now uses same IP range as NAT mode -* Tokens are now verified on port 443 -* Removed outdated uninstall scripts -* Increased default ulimits -* Port forwarding with `-p` and `-P` should work in VPN mode -* Fixed a memory leak in `com.docker.db` -* Fixed a race condition on startup between Docker and networking which can -lead to `Docker.app` not starting on reboot +- Fixed osxfs multiple same directory bind mounts stopping inotify +- Fixed osxfs `setattr` on mode 0 files (`sed` failures) +- Fixed osxfs blocking all operations during `readdir` +- Fixed osxfs mishandled errors which crashed the file system and VM +- Removed outdated `lofs`/`9p` support +- Added more debugging info to logs uploaded by `pinata diagnose` +- Improved diagnostics from within the virtual machine +- VirtualBox version check now also works without VBoxManage in path +- VPN mode now uses same IP range as NAT mode +- Tokens are now verified on port 443 +- Removed outdated uninstall scripts +- Increased default ulimits +- Port forwarding with `-p` and `-P` should work in VPN mode +- Fixed a memory leak in `com.docker.db` +- Fixed a race condition on startup between Docker and networking which can + lead to `Docker.app` not starting on reboot ### Beta 5 Release (2016-03-29 1.10.3-beta5) @@ -2326,8 +2297,8 @@ lead to `Docker.app` not starting on reboot - `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`. - VPN/Hostnet: In VPN mode, the `-p` option needs to be explicitly of the form -`-p :`. `-p ` and `-P` will not -work yet. + `-p :`. `-p ` and `-P` will not + work yet. **Bug fixes and minor changes** @@ -2343,7 +2314,6 @@ work yet. - Fixed watchdog crash on startup - ### Beta 3 Release (2016-03-15 1.10.3-beta3) **New Features and Upgrades** @@ -2364,7 +2334,6 @@ work yet. - Fixed osxfs chmod on sockets - FixED osxfs EINVAL from `open` using O_NOFOLLOW - - Hypervisor stability fixes, resynced with upstream repository - Hostnet/VPN mode @@ -2372,7 +2341,6 @@ work yet. - Added more verbose logging on errors in `nat` mode - Show correct forwarding details in `docker ps/inspect/port` in `nat` mode - - New lines ignored in token entry field - Feedback mail has app version in subject field @@ -2385,7 +2353,6 @@ work yet. - Fix proxy panics on EOF when decoding JSON - Fix long delay/crash when switching from `hostnet` to `nat` mode - - Logging - Moby logs included in diagnose upload - App version included in logs on startup @@ -2398,14 +2365,12 @@ work yet. - Added VPN mode/`hostnet` to Preferences - Added disable Time Machine backups of VM disk image to Preferences - - Added `pinata` configuration tool for experimental Preferences - File System: Added guest-to-guest FIFO and socket file support - Upgraded Notary to version 0.2 - **Bug fixes and minor changes** - Fixed data corruption bug during cp (use of sendfile/splice) @@ -2415,7 +2380,6 @@ work yet. - Stability fixes and tests - Fixed DNS issues when changing networks - - Cleaned up Docker startup code related to Moby - Fixed various problems with linking and dependencies @@ -2432,7 +2396,6 @@ work yet. - Added license agreement - Added MixPanel support - - Added HockeyApp crash reporting - Improve signal handling on task manager - Use ISO timestamps with microsecond precision for logging @@ -2443,17 +2406,13 @@ work yet. - docker-uninstall improvements - Remove docker-select as it's no longer used - - Hypervisor - Added PID file - Networking reliability improvements - - Hostnet - - Fixed port forwarding issue - Stability fixes - Fixed setting hostname - - Fixed permissions on `usr/local` symbolic links diff --git a/content/manuals/desktop/previous-versions/edge-releases-windows.md b/content/manuals/desktop/previous-versions/edge-releases-windows.md index ab7f67c7824..6f483b3bae3 100644 --- a/content/manuals/desktop/previous-versions/edge-releases-windows.md +++ b/content/manuals/desktop/previous-versions/edge-releases-windows.md @@ -144,8 +144,8 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus ### Known issues -- The `clock_gettime64` system call returns `EPERM` rather than `ENOSYS` -in i386 images. To work around this issue, disable `seccomp` by using +- The `clock_gettime64` system call returns `EPERM` rather than `ENOSYS` +in i386 images. To work around this issue, disable `seccomp` by using the `--privileged` flag. See [docker/for-win#8326](https://github.com/docker/for-win/issues/8326). ## Docker Desktop Community 2.3.6.2 @@ -710,7 +710,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Docker Desktop now supports a configurable user timeout for VMs on slower machines. [docker/for-win#4393](https://github.com/docker/for-win/issues/4393) - Enabled Windows features such as Hyper-V and Containers during installation, thereby reducing the need for another restart after installation. -## Docker Desktop Community 2.1.0.0 +## Docker Desktop Community 2.1.0.0 2019-07-30 @@ -728,14 +728,14 @@ This release contains Kubernetes security improvements. Note that your local Kub #### New - Introduced a new user interface for the Docker Desktop **Settings** menu. - - The **Restart** and **Reset** options are now available on the **Troubleshoot** menu. + - The **Restart** and **Reset** options are now available on the **Troubleshoot** menu. #### Bug fixes and minor changes - Changed the host's kubernetes context to ensure `docker run -v .kube:kube ... kubectl` works. - Restricted the `cluster-admin` role on local Kubernetes cluster to `kube-system` namespace. - Fixed Kubernetes installation with VPNkit subnet. - - Fixed an issue where Docker Desktop restarts when a user logs out of Windows and logs back in, which results in retaining the + - Fixed an issue where Docker Desktop restarts when a user logs out of Windows and logs back in, which results in retaining the exported ports on containers. - Reduced the VM startup time. `swap` is not created every time a virtual machine boots. - Fixed a bug which caused Docker Desktop to crash when a user cancels switching the version using Windows User Account Control (UAC) settings. @@ -795,16 +795,16 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus * New - App: Docker CLI plugin to configure, share, and install applications - + - Extend Compose files with metadata and parameters - Reuse the same application across multiple environments (Development/QA/Staging/Production) - Multi-orchestrator installation (Swarm or Kubernetes) - Push/Pull/Promotion/Signing supported for application, with the same workflow as images - Fully CNAB compliant - Full support for Docker Contexts - + - Buildx (Tech Preview): Docker CLI plugin for extended build capabilities with BuildKit - + - Familiar UI from docker build - Full BuildKit capabilities with container driver - Multiple builder instance support @@ -847,7 +847,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus * New - Rebranded UI - + * Bug fixes and minor changes - Kubernetes: use default maximum number of pods for kubelet. [docker/for-mac#3453](https://github.com/docker/for-mac/issues/3453) @@ -860,7 +860,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - [Kubernetes 1.13.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#v1130) - [Kitematic 0.17.6](https://github.com/docker/kitematic/releases/tag/v0.17.6) - Golang 1.10.6, fixes CVEs: [CVE-2018-16875](https://www.cvedetails.com/cve/CVE-2018-16875), [CVE-2018-16873](https://www.cvedetails.com/cve/CVE-2018-16873) and [CVE-2018-16874](https://www.cvedetails.com/cve/CVE-2018-16874) - + WARNING: If you have an existing Kubernetes cluster created with Docker Desktop, this upgrade will reset the cluster. If you need to back up your Kubernetes cluster or persistent volumes you can use [Ark](https://github.com/heptio/ark). * Bug fixes and minor changes @@ -880,7 +880,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - [Docker compose 1.23.2](https://github.com/docker/compose/releases/tag/1.23.2) * Bug fixes and minor changes - - Compose: Fixed a bug where build context URLs would fail to build on Windows. Fixes [docker/for-win#2918](https://github.com/docker/for-win/issues/2918) + - Compose: Fixed a bug where build context URLs would fail to build on Windows. Fixes [docker/for-win#2918](https://github.com/docker/for-win/issues/2918) ### Docker Community Edition 2.0.0.0-win77 2018-11-14 @@ -891,7 +891,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - [Kitematic 0.17.5](https://github.com/docker/kitematic/releases/tag/v0.17.5) * Bug fixes and minor changes - - Windows Containers: Fix group daemon option settings. Fixes [docker/for-win#2647](https://github.com/docker/for-win/issues/2647) + - Windows Containers: Fix group daemon option settings. Fixes [docker/for-win#2647](https://github.com/docker/for-win/issues/2647) - Windows Containers: Improve host.docker.internal ip resolution - Do not try to update samba share mounts when using Windows containers - Improved dns update too verbose in logs @@ -909,7 +909,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus * Deprecation - Removed support of AUFS - + * Bug fixes and minor changes - LCOW does not anymore need --platform flag on multi-arch images - Better WCOW host.docker.internal resolution on host, don't rewrite it if not modified. From [docker/for-win#1976](https://github.com/docker/for-win/issues/1976) @@ -977,13 +977,13 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - [Docker 18.05.0-ce](https://github.com/docker/docker-ce/releases/tag/v18.05.0-ce) - [Docker compose 1.21.2](https://github.com/docker/compose/releases/tag/1.21.2) -* New +* New - Allow orchestrator selection from the UI in the "Kubernetes" pane, to allow "docker stack" commands to deploy to swarm clusters, even if Kubernetes is enabled in Docker for Windows. * Bug fixes and minor changes - Fix restart issue when using Windows fast startup on latest 1709 Windows updates. Fixes [docker/for-win#1741](https://github.com/docker/for-win/issues/1741), [docker/for-win#1741](https://github.com/docker/for-win/issues/1741) - DNS name `host.docker.internal` can be used for host resolution from Windows containers. Fixes [docker/for-win#1976](https://github.com/docker/for-win/issues/1976) - - Fix broken link in diagnostics window. + - Fix broken link in diagnostics window. ### Docker Community Edition 18.05.0-ce-rc1-win63 2018-04-26 @@ -995,7 +995,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Fix startup issue due to incompatibility with other programs (like Razer Synapse 3). Fixes [docker/for-win#1723](https://github.com/docker/for-win/issues/1723) - Fix Kubernetes hostPath translation for PersistentVolumeClaim (PVC). Previously failing PVCs must be deleted and recreated. Fixes [docker/for-win#1758](https://github.com/docker/for-win/issues/1758) - Fix Kubernetes status when resetting to factory defaults. - + ### Docker Community Edition 18.04.0-ce-win62 2018-04-12 @@ -1009,7 +1009,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - [Docker 18.04.0-ce-rc2](https://github.com/docker/docker-ce/releases/tag/v18.04.0-ce-rc2) - [Kubernetes 1.9.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.9.md#v196). If Kubernetes is enabled, the upgrade will be performed automatically when starting Docker for Windows. -* New +* New - Enable ceph & rbd modules in LinuxKit VM. * Bug fixes and minor changes @@ -1094,7 +1094,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - [Docker 18.02.0-ce-rc1](https://github.com/docker/docker-ce/releases/tag/v18.02.0-ce-rc1) * New - - Experimental Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker for Windows settings and use kubectl commands as well as docker commands. See [the Kubernetes section](/manuals/desktop/features/kubernetes.md). + - Experimental Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker for Windows settings and use kubectl commands as well as docker commands. See [the Kubernetes section](/manuals/desktop/use-desktop/kubernetes.md). - LCOW containers can now be run next to Windows containers (on Windows RS3 build 16299 and later). Use `--platform=linux` in Windows container mode to run Linux Containers On Windows. Note that LCOW is still experimental, and requires daemon `experimental` option. * Bug fixes and minor changes @@ -2561,7 +2561,7 @@ are working on a solution. - Improve layout of About and Settings dialog - Improve Updater - Link to *Help* - - Link to *Send Feeback* + - Link to *Send Feedback* **General** diff --git a/content/manuals/desktop/release-notes.md b/content/manuals/desktop/release-notes.md index 48295917df0..2232a3e36be 100644 --- a/content/manuals/desktop/release-notes.md +++ b/content/manuals/desktop/release-notes.md @@ -16,8 +16,9 @@ aliases: - /mackit/release-notes/ weight: 220 --- + -This page contains information about the new features, improvements, known issues, and bug fixes in Docker Desktop releases. +This page contains information about the new features, improvements, known issues, and bug fixes in Docker Desktop releases. Releases are gradually rolled out to ensure quality control. If the latest version is not yet available to you, allow some time — updates typically become available within a week of the release date. @@ -25,16 +26,1327 @@ Docker Desktop versions older than 6 months from the latest release are not avai For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoot-and-support/faqs/releases.md). -> [!WARNING] +## 4.73.0 + +{{< release-date date="2026-05-11" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.73.0" build_path="/226246/" >}} + +### Updates + +- [Docker Engine v29.4.3](https://docs.docker.com/engine/release-notes/29/#2943) +- [Docker Agent v1.54.0](https://github.com/docker/docker-agent/releases/tag/v1.54.0) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed `Cmd+Q` (Mac) and `Ctrl+Q` (Windows/Linux) not fully quitting Docker Desktop. Fixes [docker/for-mac#7833](https://github.com/docker/for-mac/issues/7833). +- Fixed a bug where canceling `docker load` left a containerd ref lock held, causing subsequent loads of the same image to fail. +- Fixed an issue where Docker Desktop made unnecessary network requests to `mcp.docker.com` on sign-in when MCP Toolkit was disabled, causing unexpected proxy authentication prompts. +- Fixed an issue where the search input in Gordon's session sidebar would not close if it was left empty. + +#### For Mac + +- Fixed excessive memory usage on Apple Silicon Macs by improving the Linux VM's ability to return freed container memory back to the host OS. +- Fixed a bug where containers received connections with a corrupted source IP when another container had an active outbound connection to an IP in the same subnet range. Fixes [docker/for-mac#7824](https://github.com/docker/for-mac/issues/7824). + +## 4.72.0 + +{{< release-date date="2026-05-06" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.72.0" build_path="/225998/" >}} + +### New + +- The **Logs** view is now generally available. +- New installations of Docker Desktop for Windows have a choice between per-user (Beta) or all-user installs. + +### Updates + +- [Docker Agent v1.50.0](https://github.com/docker/docker-agent/releases/tag/v1.50.0) +- [Docker DHI (`dhictl`) v0.0.3](https://github.com/docker-hardened-images/dhictl/releases/tag/v0.0.3) +- [Docker Model Runner v1.1.37](https://github.com/docker/model-cli/releases/tag/v1.1.37) +- [credential helpers v0.9.6](https://github.com/docker/docker-credential-helpers/releases/tag/v0.9.6) + +### Security + +- The Extensions settings page now includes a security notice that extensions run with host-level privileges and are not audited by Docker. +- [Fixed CVE-2026-31431 ("copy.fail")](https://xint.io/blog/copy-fail-linux-distributions) by backporting an upstream Linux kernel patch that prevents an unprivileged container user from gaining root inside the container via a controlled write into the host VM page cache. + +### Bug fixes and enhancements + +#### For all platforms + +- Improvements to Docker Offload idle notifications. +- Fixed the **Open Gordon in TUI** button not working due to a missing `run` subcommand in Docker Agent command arguments. +- Fixed an issue where transient network errors or Docker Hub server errors during sign-in would unexpectedly sign users out instead of retrying automatically. +- Improved data refresh for the Containers, Images, and Volumes screens by fetching up-to-date data on demand when navigating to those screens, reducing background polling load. +- Fixed a kernel crash that could occur when changing filesharing technology after significant container file activity. +- Enable the OpenAI Responses API (`/responses`) endpoint in Docker Model Runner. +- Fixed a bug where users were unexpectedly signed out of Docker Desktop mid-flow when signing in via `docker login` using OAuth. + +#### For Windows + +- Fixed a bug on Windows where selecting the Docker Desktop taskbar icon multiple times could spawn multiple backend processes. Re-selecting the icon while Docker Desktop is running now brings the dashboard to focus. +- Fixed a race condition on Windows that caused a false-positive "processes still running" dialog to appear when Docker Desktop starts or exits normally. + +### For Linux + +- Support for RHEL 8 has been dropped. + +## 4.71.0 + +{{< release-date date="2026-04-27" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.71.0" build_path="/225177/" >}} + +> [!IMPORTANT] +> +> Support for RHEL 8 has ended. Installing Docker Desktop will require RHEL 9 or RHEL 10 in the next release. + +### Updates + +- [Docker Model Runner v1.1.36](https://github.com/docker/model-runner/releases/tag/v1.1.36) +- [containerd to v2.2.3](https://github.com/containerd/containerd/releases/tag/v2.2.3) +- [Runc v1.3.5](https://github.com/opencontainers/runc/releases/tag/v1.3.5) +- [Docker Compose v5.1.3](https://github.com/docker/compose/releases/tag/v5.1.3) +- [Docker Agent v1.44.0](https://github.com/docker/docker-agent/releases/tag/v1.44.0) +- [Docker Engine v29.4.1](/manuals/engine/release-notes/29.md#2941) + +### Bug fixes and enhancements + +#### For all platforms + +- Docker Model Runner is now disabled by default and must be explicitly enabled in **Settings**. When enabled, TCP host-side support is automatically active. +- Fixed an issue where downloading a Docker Desktop update would fail without a clear error if the disk had insufficient free space. +- Fixed an issue where Docker Scout tag recommendations, when inspecting an image, failed when the base image digest or repository name was empty. +- Added a **Switch to local Docker context** button on the sign-in screen, allowing users in a cloud context to switch back to their local context without signing in. +- Added a dedicated **Stopped** status screen for the cloud engine so users see a clear stopped state instead of an error screen when transitioning away from Docker Offload. + +#### For Mac + +- Fixed an issue where error tracking would temporarily continue sending session data directly after a user disabled analytics. Fixes [docker/for-mac#7768](https://github.com/docker/for-mac/issues/7768). + +#### For Windows + +- Fixed a critical issue where Docker Desktop Dashboard failed to open with `ERR_FAILED` errors caused by process hardening policies conflicting with Chromium. +- Fixed a bug where Kubernetes could fail to start on WSL 2 when `HTTP_PROXY` environment variables are set in WSL 2 itself. +- Fixed a bug in Enhanced Container Isolation (ECI) that was causing loss of container `rootfs` persistence across Docker Desktop restarts, when using WSL. + +## 4.70.0 + +{{< release-date date="2026-04-20" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.70.0" build_path="/224270/" >}} + +### New + +- Added a CLI hint that surfaces the **Logs** view when running `logs`, `compose logs`, `compose attach`, or `compose up` commands, giving you quick access to logs across all running containers. Available with the **Logs** (Beta) feature enabled. + +### Updates + +- [Docker Compose v5.1.2](https://github.com/docker/compose/releases/tag/v5.1.2) +- [Docker Engine v29.4.0](/manuals/engine/release-notes/29.md#2940) +- [Docker Agent v1.43.0](https://github.com/docker/docker-agent/releases/tag/v1.43.0) +- [Docker Model Runner v1.1.33](https://github.com/docker/model-runner/releases/tag/v1.1.33) +- [Docker Scout CLI v1.20.4](https://github.com/docker/scout-cli/releases/tag/v1.20.4) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a bug where `docker login` could fail silently in CI environments due to slow Docker Hub responses causing credential store update timeouts. +- Fixed an issue where disabling Beta features also disabled Docker Model Runner. +- Fixed `docker desktop start` causing the Docker AI agent API daemon to fail due to an inherited CLI plugin environment variable. + +#### For Mac + +- Fixed a crash loop where Docker Desktop repeatedly failed to start with exit status `42` after an update due to a corrupted `DockerAppLaunchPath` setting. +- Fixed an issue where a failed update could leave Docker Desktop in a broken state. The installer now automatically reverts to the previous version and shows a clear error message. +- Fixed a bug where stopping one container could disrupt active Unix socket forwards belonging to other running containers. + +#### For Windows + +- Fixed an issue where a failed update could leave Docker Desktop in a broken state. The installer now automatically reverts to the previous version and shows a clear error message. +- Fixed a bug where a failed switch to Windows containers could leave Docker Desktop in a broken state, requiring a restart. +- Fixed an issue where Docker Desktop failed to launch for users with `DEVHOME` set in their environment. +- Temporarily rolled back process hardening that caused Electron crashes on Windows. Fixes [docker/desktop-feedback#245](https://github.com/docker/desktop-feedback/issues/245). + +## 4.69.0 + +{{< release-date date="2026-04-13" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.69.0" build_path="/224084/" >}} + +### Updates + +- [Docker Agent v1.42.0](https://github.com/docker/docker-agent/releases/tag/v1.42.0) +- [Docker Model v1.1.29](https://github.com/docker/model-runner/releases/tag/v1.1.29) +- [containerd v2.2.2](https://github.com/containerd/containerd/releases/tag/v2.2.2) +- [Docker Buildx v0.33.0](https://github.com/docker/buildx/releases/tag/v0.33.0) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where `docker logout` from the CLI was ignored by Docker Desktop when OAuth tokens remained in the credential store, leaving the user unexpectedly signed in. +- Fixed an issue where Docker Desktop could unexpectedly sign users out when unrelated credential updates, `docker login`, or transient network errors triggered a sign-out. +- Fixed a data loss issue where backup data could be deleted during a failed restore operation, leaving users with no data. +- Fixed an issue where sign-in credentials (`login-info.json`) could be included in diagnostic bundles, improving privacy and security. Note that this file contains an encoded organisation(s) name, plan name, encoded username, and encoded email only. No passwords or credentials are included. +- Fixed the footer update label incorrectly showing **Downloading** during the prepare/unpack phase of an update. It now correctly displays **Preparing**. +- Fixed an issue where Docker Desktop would not start when the internal storage disk was full. + +#### For Mac + +- Fixed an issue where the in-app update button was not disabled when `Docker.app` was installed in a non-user-writable directory, preventing failed update attempts. +- Fixed update failure for users who installed Docker Desktop via Homebrew on Mac. + +#### For Windows + +- Fixed an unexpected WSL terminal popup appearing for Windows users using the Hyper-V backend during Docker Desktop installation or uninstallation. +- Fixed an issue on Windows where factory reset deleted CLI plugins from `~/.docker/cli-plugins`, causing `docker build` to fall back to the legacy builder. +- Fixed a bug where Kubernetes failed to start when WSL integration was enabled alongside another distro using cgroup v1 controllers. +- Fixed a race condition that caused Kubernetes to fail to start when a Registry Access Management policy change occurred during startup. +- Prevent Docker Desktop from fatally failing due to transient 'Access is denied' errors during file operations on Windows. + +## 4.68.0 + +{{< release-date date="2026-04-07" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.68.0" build_path="/223695/" >}} + +### New + +- Gordon now has persistent local memory, allowing it to remember your preferences and context across sessions. + +### Updates + +- [Docker Agent v1.39.0](https://github.com/docker/docker-agent/releases/tag/v1.39.0) +- [Docker Model v1.1.28](https://github.com/docker/model-runner/releases/tag/v1.1.28) +- [Docker Offload v0.5.81](https://github.com/docker/cloud/releases/tag/v0.5.81) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a deadlock in Enhanced Container Isolation that caused containers to hang indefinitely during creation when ECI was enabled. +- Added a warning banner to alert when an MCP server is community-provided and has not been verified by Docker. +- Added a persistent **Show timestamps** toggle to the **Logs** view, allowing timestamps to be hidden in both table and visualiser views across sessions. +- Fixed an issue where Docker Desktop frontend processes were not properly terminated on quit. +- Fixed a deadlock when settings controlled by admins reload that could cause Docker Desktop to become unresponsive during sign in or sign out operations. +- Fixed a bug where Docker Desktop could fail to start due to uncorrectable filesystem errors on the disk image not being repaired. +- Fixed a bug that caused Enhanced Container Isolation (ECI) to inadvertently block startup of Kubernetes clusters. +- Fixed an issue where a failed volume size fetch could make the **Volumes** view inaccessible; container counts on volumes now correctly exclude bind mounts. +- Fixed race conditions in volume backup that could cause containers to be incorrectly restarted, export logs to be corrupted, or runtime panics when scheduling tasks. +- Fixed a crash in the API cache that occurred when containers with no names caused a panic disrupting container listing. +- Fixed a bug where starting a container could fail with `ENOENT` if a bind-mount parent directory was deleted while no container was using it. + +#### For Mac + +- Fixed a security vulnerability where tampered user-deployed config profiles could bypass organization sign-in enforcement. +- Fixed a bug where a failed `vmnetd` handshake could dispatch a bogus command on a broken connection, causing unexpected networking errors. +- Fixed a bug where the Docker Desktop Dashboard could be prematurely displayed when restoring to a fullscreen state on launch. +- Fixed nested bind mounts showing empty child mount content on VirtioFS when using Docker Compose with multiple services sharing a volume. Fixes [docker/desktop-feedback#264](https://github.com/docker/desktop-feedback/issues/264). + +#### For Windows + +- Fixed an issue where the installer extraction did not update the progress bar and could take around 5 minutes, depending on the machine. Extraction is now ~60% faster and includes proper progress updates. +- Fixed a race condition where container ports would sometimes not be published correctly after container start, affecting ephemeral ports, `--publish-all`, and gateway IP bindings. +- Fixed an issue where a failed WSL distro move could leave the distro unregistered. + +## 4.67.0 + +{{< release-date date="2026-03-30" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.67.0" build_path="/222858/" >}} + +### New + +- Docker MCP Toolkit now has MCP profile template cards and an onboarding tour accessible via the **Profiles** tab. + +### Updates + +- [Docker Compose v5.1.1](https://github.com/docker/compose/releases/tag/v5.1.1) +- [Docker Agent v1.34.0](https://github.com/docker/docker-agent/releases/tag/v1.34.0) +- [Docker Scout CLI v1.20.3](https://github.com/docker/scout-cli/releases/tag/v1.20.3) +- [Docker Model v1.1.25](https://github.com/docker/model-runner/releases/tag/v1.1.25) + +### Bug fixes and minor changes + +#### For all platforms + +- Docker Model Runner now supports Qwen3.5. +- With the new **Logs (Beta)** view, you can now filter container logs by Compose stack. +- Improved interaction with **Settings** while the Docker engine or Kubernetes is starting or stopping. +- Fixed a bug where random UDP port bindings reported port `0` instead of the actual assigned port. +- Fixed an issue with the Docker Desktop shortcut not reopening the Dashboard when Docker Desktop was already running. +- Fixed an issue where the **Add to existing profile** dialog showed profiles that already contained all selected MCP servers in the dropdown. + +#### For Mac + +- Fixed intermittent `exec format error` when starting amd64 containers on Apple Silicon Macs due to a race condition between Rosetta `binfmt` registration and `virtiofs` device availability. + +#### For Windows + +- Fixed Hyper-V being silently re-enabled on every EXE upgrade for WSL 2 users. +- Fixed an MSI installer bug where Docker Desktop processes could be left running after uninstall. +- Fixed an issue on Windows where installations or updates using `--installation-dir` would fail due to the installer archive being extracted into the custom installation directory. +- Improved Docker Desktop startup time on Windows by several seconds when using WSL 2. +- Fixed a bug on the **Models** > **Logs** screen which caused `docker-model` processes to accumulate on Windows each time the screen was visited. + +### Security + +- Addressed [CVE-2026-33990](https://www.cve.org/cverecord?id=CVE-2026-33990), SSRF in Docker Model Runner OCI Registry Client + +## 4.66.1 + +{{< release-date date="2026-03-26" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.66.1" build_path="/222799/" >}} + +### Updates + +- [Docker Engine v29.3.1](/manuals/engine/release-notes/29.md#2931) + +## 4.66.0 + +{{< release-date date="2026-03-23" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.66.0" build_path="/222299/" >}} + +### Updates + +- [Docker Engine v29.3.0](https://docs.docker.com/engine/release-notes/29/#2930) +- [NVIDIA Container Toolkit v1.19.0](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.19.0) + +### Bug fixes and minor changes + +#### For all platforms + +- Gordon improvements: + - Provides pre-filled prompts when deeplinking from command-line failure hints. + - Prevents Docker Hub rate limiting by authenticating before making requests. +- Fixed a Kubernetes pod discovery hang when the kube context is broken or unreachable. +- Fixed a terminal crash caused by an undefined dimensions error during terminal resize. +- Fixed volume backup export error handling for file, image, and registry export operations. + +#### For Windows + +- Fixed high CPU usage in the Windows API proxy caused by unnecessary process enumeration. +- Fixed the Windows MSI installer failing to update Docker Desktop. Versions between 4.56 and 4.65 need to uninstall before reinstalling version 4.66 or later. Note that uninstalling removes all associated data. + +## 4.65.0 + +{{< release-date date="2026-03-16" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.65.0" build_path="/221669/" >}} + +### New + +- Added a new **Logs** view where you can explore logs from all sources in one unified view. (Beta) +- Gordon hints now appear when `docker build`, `docker run`, or `docker compose` commands fail, offering contextual suggestions. +- Community MCP servers now support OAuth authentication directly in the UI. +- Added the [`docker dhi` CLI plugin](https://github.com/docker-hardened-images/dhictl) for managing Docker Hardened Images. + +### Updates + +- [Docker Scout CLI v1.20.1](https://github.com/docker/scout-cli/releases/tag/v1.20.1) +- [Docker Agent v1.29.0](https://github.com/docker/docker-agent/releases/tag/v1.29.0) +- [Docker Buildx v0.32.1](https://github.com/docker/buildx/releases/tag/v0.32.1) + +### Bug fixes and minor changes + +#### For all platforms + +- Kubernetes now defaults to kind for new clusters. +- Fixed update progress bar not resuming correctly. + +#### For Windows + +- Improved startup time by skipping docker-users group check when using WSL2 backend. + +### Known issues + +- The Windows MSI installer cannot update an existing Docker Desktop installation when the current version is between 4.56 and 4.65. As a workaround, uninstall the existing version before reinstalling the latest version. Note that uninstalling removes all associated data. + +## 4.64.0 + +{{< release-date date="2026-03-11" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.64.0" build_path="/221278/" >}} + +### Updates + +- [Docker Compose v5.1.0](https://github.com/docker/compose/releases/tag/v5.1.0) +- [Docker Scout CLI v1.20.0](https://github.com/docker/scout-cli/releases/tag/v1.20.0) +- [Docker Agent v1.27.1](https://github.com/docker/docker-agent/releases/tag/v1.27.1) + +### Bug fixes and minor changes + +#### For all platforms + +- Fixed a bug in MCP Toolkit where disabling all tools in a profile would enable all tools. +- Fixed the `docker ai` command stopping after a Docker Agent update. +- Fixed Gordon session title flickering when hover buttons appeared. +- Improved Gordon summary rendering and reduced narrative verbosity. +- Fixed a bug where `docker ai` CLI commands did not correctly shell out to Docker Agent. +- Fixed the **OAuth** tab in Docker MCP Toolkit not showing entries from all catalogs. +- Improved MCP Catalog search. +- Fixed the **Build logs** tab not retaining search terms and filters when switching tabs. +- Fixed Kind container startup to be more reliable. + +#### For Mac + +- Improved update error reporting with more descriptive diagnostics. +- Improved update reliability by preparing the updated `Docker.app` under `Application Support` instead of `/tmp`. + +### Known issues + +- The Windows MSI installer cannot update an existing Docker Desktop installation when the current version is between 4.56 and 4.65. As a workaround, uninstall the existing version before reinstalling the latest version. Note that uninstalling removes all associated data. + +## 4.63.0 + +{{< release-date date="2026-03-02" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.63.0" build_path="/220185/" >}} + +### New + +- Added SLSA v1 provenance support in the **Builds** view. + +### Updates + +- [Kubernetes v1.34.3](https://github.com/kubernetes/kubernetes/releases/tag/v1.34.3) +- Linux kernel `v6.12.72` + +### Bug fixes and minor changes + +#### For all platforms + +- Enhanced the proxy settings UI and added a separate proxy for containers. +- Fixed an issue where community registry MCP catalogs failed to load when a server's config object contained `"required": null`. +- Fixed an issue where `mcp-gateway` would hang when fetching secrets from the Secrets Engine while the Docker Desktop VM was in Resource Saver mode. +- Rebranded "Docker AI" references to "Gordon". + +#### For Windows + +- Improved startup time on Windows. + +### Known issues + +- The Windows MSI installer cannot update an existing Docker Desktop installation when the current version is between 4.56 and 4.65. As a workaround, uninstall the existing version before reinstalling the latest version. Note that uninstalling removes all associated data. + +## 4.62.0 + +{{< release-date date="2026-02-23" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.62.0" build_path="/219486/" >}} + +### New + +- With Docker MCP Toolkit, you can now use [profiles](/manuals/ai/mcp-catalog-and-toolkit/profiles.md) to organize your MCP servers into named collections. You can also create custom catalogs — curated collections of servers for your team or organization. + +### Updates + +- Linux kernel `v6.12.69` + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where background update checks did not respect the **Automatically check for updates** setting when disabled. Fixes [docker/for-mac#3908](https://github.com/docker/for-mac/issues/3908). + +#### For Mac + +- Added support for vLLM Metal in Docker Model Runner. + +#### For Linux + +- Fixed a networking crash on QEMU 10.2.0 and later. + +### Security + +- Addressed [CVE-2026-2664](https://www.cve.org/cverecord?id=CVE-2026-2664), out of bounds read in grpcfuse kernel module. +- Addressed [CVE-2026-28400](https://www.cve.org/cverecord?id=CVE-2026-28400), runtime flag injection in Docker Model Runner. + +### Known issues + +- The Windows MSI installer cannot update an existing Docker Desktop installation when the current version is between 4.56 and 4.65. As a workaround, uninstall the existing version before reinstalling the latest version. Note that uninstalling removes all associated data. + +## 4.61.0 + +{{< release-date date="2026-02-18" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.61.0" build_path="/219004/" >}} + +### New + +- You can now customize the left-hand navigation to show only the tabs that matter to you, and hide the ones that don’t. + +### Updates + +- Linux kernel `v6.12.68` +- [Docker Engine v29.2.1](https://docs.docker.com/engine/release-notes/29/#2921) +- Docker Sandbox `v0.12.0` + +### Bug fixes and enhancements + +#### For all platforms + +- Docker Sandboxes: + - Added automated image caching to prevent re-downloading images unnecessarily. + - Added Shell mode for blank coding agent sandboxes. + - Added support for OpenCode. + - Added support for mounting multiple workspaces. + - Added experimental Linux support (single user only, UID 1000). + - Added support for running in WSL 2. + - Sandboxes now start in the current working directory if no path is provided. + +### Known issues + +- The Windows MSI installer cannot update an existing Docker Desktop installation when the current version is between 4.56 and 4.65. As a workaround, uninstall the existing version before reinstalling the latest version. Note that uninstalling removes all associated data. + +## 4.60.1 + +{{< release-date date="2026-02-10" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.60.1" build_path="/218372/" >}} + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a rare issue that crashed the Docker Desktop Dashboard after sign-in. + +## 4.60.0 + +{{< release-date date="2026-02-09" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.60.0" build_path="/218231/" >}} + +### New + +- Added a new `docker desktop diagnose` command to gather diagnostics. + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed `ping6 host.docker.internal`. +- Enabled landlock LSM. +- Docker Sandboxes improvements: + - Improved agent system prompt with network access documentation + - Fixed Gemini API key injection + - Sandboxes now block `console.anthropic.com/claude.ai` in proxy default rules + - Fix CLI help text for `run --help` + - Improved terminal size handling + +### Known issues + +- The Windows MSI installer cannot update an existing Docker Desktop installation when the current version is between 4.56 and 4.65. As a workaround, uninstall the existing version before reinstalling the latest version. Note that uninstalling removes all associated data. + +## 4.59.1 + +{{< release-date date="2026-02-03" >}} + +{{< desktop-install-v2 mac=true version="4.59.1" build_path="/217750/" >}} + +### Bug fixes and enhancements + +#### For Mac + +- Fixed an issue where CPU usage could spike at regular intervals. Fixes [docker/for-mac#7839](https://github.com/docker/for-mac/issues/7839). + +## 4.59.0 + +{{< release-date date="2026-02-02" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.59.0" build_path="/217644/" >}} + +### Updates + +- Linux kernel `v6.12.67` +- [Docker Compose v5.0.2](https://github.com/docker/compose/releases/tag/v5.0.2) +- Docker Sandbox `v0.10.1` +- [Docker Buildx v0.31.1](https://github.com/docker/buildx/releases/tag/v0.31.1) + +### Bug fixes and enhancements + +#### For all platforms + +- Added Neo4j as a known publisher to the Docker MCP Catalog. +- Fixed an issue where the **Models** tab would crash when displaying requests made via the Anthropic Messages API. + +#### For Mac + +- Fixed an issue where shared file permissions could be unintentionally modified when using DockerVMM. Fixes [docker/for-mac#7830](https://github.com/docker/for-mac/issues/7830). + +#### For Windows + +- Fixed an issue where container secrets injection could fail with `docker-pass`. +- Temporarily disabled VHDX compaction for the WSL data disk to improve stability. + +### Security + +- Fixed a security issue in enhanced container isolation where Docker socket mount permissions could be bypassed when using the `--use-api-socket` flag. + +## 4.58.1 + +{{< release-date date="2026-01-29" >}} + +{{< desktop-install-v2 mac=true version="4.58.1" build_path="/217134/" >}} + +### Bug fixes and enhancements + +#### For Mac + +- Fixed an issue where CPU usage could spike at regular intervals. Fixes [docker/for-mac#7839](https://github.com/docker/for-mac/issues/7839). + +## 4.58.0 + +{{< release-date date="2026-01-26" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.58.0" build_path="/216728/" >}} + +### New + +- A new version of [Docker Sandboxes](/manuals/ai/sandboxes/_index.md) is now available on Docker Desktop. It provides a secure, isolated, microVM-based environment for running coding agents. + +### Updates + +- Linux kernel `v6.12.65` +- [Credential helpers v0.9.5](https://github.com/docker/docker-credential-helpers/releases/tag/v0.9.5) +- [Docker Engine v29.1.5](https://docs.docker.com/engine/release-notes/29/#2915) + +### Bug fixes and enhancements + +#### For all platforms + +- Docker Model Runner now exposes an [Anthropic-compatible API](/manuals/ai/model-runner/api-reference.md#anthropic-compatible-api). +- Docker Desktop now supports UTF-8 BOM for `admin-settings.json` and `registry.json`. +- Fixed an issue where admin settings incorrectly changed user proxy settings after a restart. + +> [!IMPORTANT] +> +> Starting with Docker Desktop version 4.59, installing an update from the tray menu will proceed without opening the Docker Desktop Dashboard. + +#### For Mac + +- Fixed a bug where shared file permissions could be modified inadvertently while using DockerVMM on macOS. Fixes [docker/for-mac#7830](https://github.com/docker/for-mac/issues/7830). + +#### For Windows + +- Fixed an issue where the installer failed because of special ACLs set on `ProgramData`. + + +### Security + +- Updated Kubernetes images to address CVEs. + - Kind: + - `docker/desktop-containerd-registry-mirror:v0.0.3` + - `docker/desktop-cloud-provider-kind:v0.5.0` + - Kubeadm: + - `docker/desktop-vpnkit-controller:v4.0` + - `docker/desktop-storage-provisioner:v3.0` +- The `kind` dependency image `envoyproxy/envoy` was upgraded from v1.32.6 to v1.36.4. If you mirror `kind` images, ensure your mirrors are updated. + +## 4.57.0 + +{{< release-date date="2026-01-19" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.57.0" build_path="/215387/" >}} + +### Security + +- Fixed [CVE-2025-14740](https://www.cve.org/cverecord?id=CVE-2025-14740) where the Docker Desktop for Windows installer contained multiple incorrect permission assignment vulnerabilities in the handling of the `C:\ProgramData\DockerDesktop` directory. + +### New + +- Docker Desktop now has a new issue tracker for all platforms at https://github.com/docker/desktop-feedback. Relevant, actively discussed issues from the previous platform-specific trackers will be migrated. + +### Updates + +- [Docker Compose v5.0.1](https://github.com/docker/compose/releases/tag/v5.0.1) + +### Bug fixes and enhancements + +#### For all platforms + +- Improved alignment of the Ask Gordon streaming indicator so it stays in sync with content on large screens. +- Fixed a bug where `docker debug` failed on containers started with environment variables but no '='. For example, `docker run -e NONEXISTENT_ENV_VAR`. + +## 4.56.0 + +{{< release-date date="2026-01-12" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.56.0" build_path="/214940/" >}} + +### New + +- Docker Desktop now includes Docker Compose v5 which introduces a new official Go SDK. This SDK provides a comprehensive API that lets you integrate Compose functionality directly into your applications, allowing you to load, validate, and manage multi-container environments without relying on the Compose CLI. For more information, see the [Compose SDK docs](/manuals/compose/compose-sdk.md). + +### Updates + +- [containerd v2.2.1](https://github.com/containerd/containerd/releases/tag/v2.2.1) +- [Docker Compose v5.0.0](https://github.com/docker/compose/releases/tag/v5.0.0) +- [Docker Agent v1.18.6](https://github.com/docker/docker-agent/releases/tag/v1.18.6) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a panic in filesharing tests when containers don't have an IP address immediately after starting. +- Added support for custom DNS entries in the LinuxKit VM with the `ExtraDNSEntries` configuration field. + +#### For Windows + +- Fixed a bug on Windows where removing the state directory would fail because log files were still open. +- Fixed installations from the Microsoft Store wrongly advertizing a new update. +- Fixed a crash when running `/sbin/ldconfig` in `ubuntu:22.04` ARM64 containers by upgrading QEMU from 8.1.5 to 10.0.4. This resolves a known issue reported in [docker/for-win#15004](https://github.com/docker/for-win/issues/15004). + + > [!NOTE] + > + > When running under ARM64 emulation, some `amd64` Go binaries built with older Go versions may still segfault. To avoid this, rebuild affected binaries using Go 1.25.4 or later. For details, see [golang/go#69255](https://github.com/golang/go/issues/69255) and the corresponding [Go commit](https://github.com/golang/go/commit/bf95b767394eb5643265f44c7b98bdbb85b897ce). + +#### For Linux + +- Fixed Kubernetes `hostPath` volume mounts failing on Linux hosts. Fixes [docker/desktop-linux#12](https://github.com/docker/desktop-linux/issues/12). + +## 4.55.0 + +{{< release-date date="2025-12-16" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.55.0" build_path="/213807/" >}} + +### Updates + +- [Docker Engine v29.1.3](https://docs.docker.com/engine/release-notes/29/#2913) +- [Docker Agent v1.15.1](https://github.com/docker/docker-agent/releases/tag/v1.15.1) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue that caused Docker Desktop to get stuck during startup. +- Improved the error message when the `daemon.json` is invalid. +- Fixed performance issues on every keystroke within a long Ask Gordon session. +- Fixed an issue that prevented Kubernetes in kubeadm mode from starting up when an organization has configured Registry Access Management to block Docker Hub. + +> [!IMPORTANT] +> +> Wasm workloads will be deprecated and removed in a future Docker Desktop release. + +## 4.54.0 + +{{< release-date date="2025-12-04" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.54.0" build_path="/212467/" >}} + +### New + +- Added support for vLLM in Docker Model Runner on Windows with WSL2 and NVIDIA GPUs. + +### Bug fixes and enhancements + +#### For Mac + +- Fixed a bug where `/dev/shm` did not have enough permission for containers to write into. Fixes [docker/for-mac#7804](https://github.com/docker/for-mac/issues/7804). + +### Upgrades + +- [Docker Buildx v0.30.1](https://github.com/docker/buildx/releases/tag/v0.30.1) +- [Docker Engine v29.1.2](https://docs.docker.com/engine/release-notes/29/#2912) +- [Runc v1.3.4](https://github.com/opencontainers/runc/releases/tag/v1.3.4) +- [Docker Model Runner CLI v1.0.2](https://github.com/docker/model-runner/releases/tag/cmd%2Fcli%2Fv1.0.2) + +### Security + +- Added a security patch to address [CVE-2025-13743](https://www.cve.org/cverecord?id=CVE-2025-13743) where Docker Desktop diagnostics bundles were found to include expired Hub PATs in log output due to error object serialization. + +## 4.53.0 + +{{< release-date date="2025-11-27" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.53.0" build_path="/211793/" >}} + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where the Support Diagnostics tooling inadvertently captured expired Docker Hub authorization bearer tokens. + +### Security + +- Added security patches to address CVEs [2025-52565](https://github.com/opencontainers/runc/security/advisories/GHSA-9493-h29p-rfm2), [2025-52881](https://github.com/opencontainers/runc/security/advisories/GHSA-cgrx-mc8f-2prm), and [2025-31133](https://github.com/opencontainers/runc/security/advisories/GHSA-qw9x-cqr3-wc7r) when using [Enhanced Container Isolation](https://docs.docker.com/enterprise/security/hardened-desktop/enhanced-container-isolation). + +## 4.52.0 + +{{< release-date date="2025-11-20" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.52.0" build_path="/210994/" >}} + +### New + +- Added new port binding settings to Docker Desktop. This can also be controlled by administrators via Settings Management using the `admin-settings.json` file. +- Added a new Docker Model Runner command. With `docker model purge` you can remove all your models. + +### Upgrades + +- [Docker Engine v29.0.0](/manuals/engine/release-notes/29.md#2900) +- [Docker Model Runner v1.0.3](https://github.com/docker/model-runner/releases/tag/v1.0.3) +- [Docker Model Runner CLI v1.0.0](https://github.com/docker/model-runner/releases/tag/cmd%2Fcli%2Fv1.0.0) +- Docker MCP plugin `v0.28.0` + +### Bug fixes and enhancements + +#### For all platforms + +- Docker MCP Toolkit improvements: + - Amazon Q client support + - OAuth DCR (Dynamic Client Registration) with Docker Engine + - Create MCP profiles using the CLI +- Docker Model Runner improvements: + - You can now skip the `/engines` prefix for [Docker Model Runner's OpenAI API endpoint](/manuals/ai/model-runner/api-reference.md#rest-api-examples) `curl http://localhost:12434/v1/models`. + - You can now skip the `ai/` prefix for the models [published on Docker Hub with](https://hub.docker.com/u/ai) `docker model pull`. + - Downloads are now resumed when they get interrupted. + +#### For Windows + +- Fixed an issue with Kerberos/NTLM proxy sign in. + +## 4.51.0 + +{{< release-date date="2025-11-13" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.51.0" build_path="/210443/" >}} + +### New + +- You can now set up your Kubernetes resources from the **Kubernetes** view. This new view also provides a real-time display of your pods, services, and deployments. + +### Upgrades + +- [Docker Engine v28.5.2](/manuals/engine/release-notes/28.md#2852) +- Linux kernel `v6.12.54` + +### Bug fixes and enhancements + +#### For all platforms + +- Kind now only pulls required dependency images if they are not available locally. + +## 4.50.0 + +{{< release-date date="2025-11-06" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.50.0" build_path="/209931/" >}} + +### New + +- [Dynamic MCP](/manuals/ai/mcp-catalog-and-toolkit/dynamic-mcp.md)(Experimental) is now available in Docker Desktop . +- Introduced a new Welcome Survey to improve onboarding. New users can now provide information to help tailor their Docker Desktop experience. + +### Upgrades + +- [Docker Compose v2.40.3](https://github.com/docker/compose/releases/tag/v2.40.3) +- [NVIDIA Container Toolkit v1.18.0](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.18.0) + +### Bug fixes and enhancements + +#### For all platforms + +- Docker Desktop now detects and attempts to avoid clashes between the "Docker subnet" and physical networks using RFC1918 addresses. For example if the host has a non-default route which overlaps with `192.168.65.0/24` then an alternative network will be chosen automatically. You can still override the choice as before via Docker Desktop settings and admin settings. +- Docker Desktop no longer treats Stargz Snapshotter failures as fatal. If a failure occurs, Docker Desktop continues to run without the Stargz Snapshotter. +- Ask Gordon no longer displays images with user provided URLs. +- Ask Gordon now asks for confirmation before running all built-in and all user added MCP tools. + +## 4.49.0 + +{{< release-date date="2025-10-23" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.49.0" build_path="/208700/" >}} + +> [!IMPORTANT] +> +> Support for Windows 10 21H2 (19044) and 11 22H2 (22621) has ended. Installing Docker Desktop will require Windows 10 22H2 (19045) or Windows 11 23H2 (22631) in the next release. + +### Security + +- Fixed [CVE-2025-9164](https://www.cve.org/cverecord?id=CVE-2025-9164) where the Docker Desktop for Windows installer was vulnerable to DLL hijacking due to insecure DLL search order. The installer searches for required DLLs in the user's Downloads folder before checking system directories, allowing local privilege escalation through malicious DLL placement. + +### New + +- [Docker Agent](/manuals/ai/docker-agent/_index.md) is now available through Docker Desktop. +- [Docker Debug](/reference/cli/docker/debug/) is now free for all users. + +### Upgrades + +- [Docker Engine v28.5.1](/manuals/engine/release-notes/28.md#2851) +- [Docker Compose v2.40.2](https://github.com/docker/compose/releases/tag/v2.40.2) +- [NVIDIA Container Toolkit v1.17.9](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.17.9) +- Docker Debug `v0.0.45` + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where Docker Desktop used an expired proxy password while waiting for the user to enter a new one. +- Fixed a 'chown' error shown on startup with Docker Debug. +- Fixed a bug that caused some forwarded UDP ports to hang. + +#### For Mac + +- Fixed Kubernetes startup hanging when another Kubernetes context was active. Fixes https://github.com/docker/for-mac/issues/7771. +- If a Rosetta install is cancelled or fails, Rosetta will be disabled in Docker Desktop. +- Minimum OS version to install or update Docker Desktop on macOS is now macOS Sonoma (version 14) or later. + +## 4.48.0 + +{{< release-date date="2025-10-09" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.48.0" build_path="/207573/" >}} + +> [!IMPORTANT] > -> If you're experiencing malware detection issues on Mac, follow the steps documented in [docker/for-mac#7527](https://github.com/docker/for-mac/issues/7527). +> Support for macOS 13 has ended. Installing Docker Desktop will require macOS 14 in the next release. + +### New + +- You can now specify PAC files and Embedded PAC scripts with installer flags for [macOS](/manuals/desktop/setup/install/mac-install.md#proxy-configuration) and [Windows](/manuals/desktop/setup/install/windows-install.md#proxy-configuration). +- Administrators can set proxy settings via [macOS configuration profiles](/manuals/enterprise/security/enforce-sign-in/methods.md#macos-configuration-profiles-method-recommended). + +### Upgrades + +- [Docker Compose v2.40.0](https://github.com/docker/compose/releases/tag/v2.40.0) +- [Docker Buildx v0.29.1](https://github.com/docker/buildx/releases/tag/v0.29.1) +- [Docker Engine v28.5.1](https://docs.docker.com/engine/release-notes/28/#2851) +- Docker MCP plugin `v0.22.0` +- [Docker Model CLI v0.1.42](https://github.com/docker/model-cli/releases/tag/v0.1.42) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where kind cluster state is sometimes reset when Desktop restarts. Fixes [docker/for-mac#77445](https://github.com/docker/for-mac/issues/7745). +- Removed the obsolete `mcp` key to align with the latest VS Code MCP server changes. +- Update credential helpers to [v0.9.4](https://github.com/docker/docker-credential-helpers/releases/tag/v0.9.4). +- Fixed an issue where Docker Desktop used an expired proxy password while waiting for the user to enter a new one. +- Fixed a bug which caused Docker Desktop to regularly create new processes with Docker CLI tools under certain conditions. Fixes [docker/for-win#14944](https://github.com/docker/for-win/issues/14944). +- Fixed a bug which caused models to not be configured for embeddings with Docker Model Runner via Compose. To specify that a model should be configured for embeddings, you must explicitly add the `--embeddings` runtime flag as described in [AI Models in Docker Compose](https://docs.docker.com/ai/compose/models-and-compose/#model-configuration-options). Fixes [docker/model-runner#166](https://github.com/docker/model-runner/issues/166). + +#### For Windows + +- Removed the `HKLM\SOFTWARE\Docker Inc.\Docker\1.0` registry key. Look for `docker.exe` in the path to find out where Docker Desktop is installed instead. +- Fixed startup in WSL 2 mode when IPv6 has been disabled. + +## 4.47.0 + +{{< release-date date="2025-09-25" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.47.0" build_path="/206054/" >}} + +### Security + +- Fixed [CVE-2025-10657](https://www.cve.org/CVERecord?id=CVE-2025-10657) where the Enhanced Container Isolation [Docker Socket command restrictions](../enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#command-restrictions) feature was not working properly in Docker Desktop 4.46.0 only (the configuration for it was being ignored). + +### New + +- Added dynamic MCP server discovery and support to Docker's MCP catalog. +- With Enhanced Container Isolation, administrators can now block `docker plugin` and `docker login` commands in containers with Docker socket mounts. +- Added a new Docker Model Runner command. With `docker model requests` you can fetch requests and responses. + +### Upgrades + +- [Docker Compose v2.39.4](https://github.com/docker/compose/releases/tag/v2.39.4) +- [Kubernetes v1.34.1](https://github.com/kubernetes/kubernetes/releases/tag/v1.34.1) + - [CNI plugins v1.7.1](https://github.com/containernetworking/plugins/releases/tag/v1.7.1) + - [cri-tools v1.33.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.33.0) + - [cri-dockerd v0.3.20](https://github.com/Mirantis/cri-dockerd/releases/tag/v0.3.20) +- Docker Debug `v0.0.44` + +### Bug fixes and enhancements + +#### For all platforms + +- You can now search for MCP servers more easily with filters, sorting, and improved search functionality. +- Docker Debug no longer hangs when debugging containers that have environment variables set to an empty value. +- Enhanced Docker Model Runner with rich response rendering in the CLI, conversational context in the Docker Desktop Dashboard, and resumable downloads. + +#### For Mac + +- Removed the `com.apple.security.cs.allow-dyld-environment-variables` entitlement which allow a signed, arbitrary dynamic library to be loaded with Docker Desktop via the `DYLD_INSERT_LIBRARIES` environment variable. +- Fixed a regression where config profile sign-in enforcement broke for some customer environments. +- Fixed a bug that sometimes caused the `docker model package` command to hang when writing to the local content store (without the `--push` flag). +- Fixed a bug where containers started with the restart policy `unless-stopped` were never restarted. Fixes [docker/for-mac#7744](https://github.com/docker/for-mac/issues/7744). + +#### For Windows + +- Fixed the Goose MCP client connection on Windows for the Docker MCP Toolkit. +- Addressed an issue with the "Skipping integration" of a WSL distro option, after a failed integration attempt. +- Fixed a bug that sometimes caused the `docker model package` command to hang when writing to the local content store (without the `--push` flag). + +## 4.46.0 + +{{< release-date date="2025-09-11" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.46.0" build_path="/204649/" >}} + +### New + +- Added a new Learning center walkthrough for Docker MCP Toolkit and other onboarding improvements. +- Administrators can now control [PAC configurations with Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md#proxy-settings). +- The update experience has been redesigned to make it easier to understand and manage updates for Docker Desktop and its components. + +### Upgrades + +- [Docker Buildx v0.28.0](https://github.com/docker/buildx/releases/tag/v0.28.0) +- [Docker Engine v28.4.0](https://docs.docker.com/engine/release-notes/28/#2840) + +### Bug fixes and enhancements + +#### For all platforms + +- With the Docker CLI, you can now set the `GODEBUG` environment variable when the key-value pair (`"GODEBUG":"..."`) exists inside the Docker context metadata. This means certificates that have negative serial numbers in the CLI binaries are supported by default. +- Updated the Docker Subscription Service Agreement link to point to the latest version. + +#### For Mac + +- Improved the security of Docker Model Runner by enabling sandboxing of the `llama.cpp` inference processes. +- Fixed a bug which caused Docker Desktop to start slowly and appear frozen. Fixes [docker/for-mac#7671](https://github.com/docker/for-mac/issues/7671). + +#### For Windows + +- Improved the security of Docker Model Runner by enabling sandboxing of the `llama.cpp` inference processes. + +#### For Linux + +- Fixed a path issue in the RHEL post-uninstall sequence. + +## 4.45.0 + +{{< release-date date="2025-08-28" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.45.0" build_path="/203075/" >}} + +### New + +- [Docker Model Runner](/manuals/ai/model-runner/_index.md) is now generally available. + +### Upgrades + +- [Docker Compose v2.39.2](https://github.com/docker/compose/releases/tag/v2.39.2) +- [Docker Buildx v0.27.0](https://github.com/docker/buildx/releases/tag/v0.27.0) +- [Docker Scout CLI v1.18.3](https://github.com/docker/scout-cli/releases/tag/v1.18.3) +- [Docker Engine v28.3.3](https://docs.docker.com/engine/release-notes/28/#2833) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a bug that caused the `com.docker.diagnose` to crash when uploading a diagnostics bundle behind a proxy that requires authentication. +- The `kind` dependency image `envoyproxy/envoy` was upgraded from v1.32.0 to v1.32.6. If you mirror `kind` images, ensure your mirrors are updated. + +#### For Mac + +- Fixed a bug that caused Docker Desktop to crash after the laptop woke from sleep. Fixes [docker/for-mac#7741](https://github.com/docker/for-mac/issues/7741). +- Fixed an issue where the VM would sometimes fail with the error **The virtual machine stopped unexpectedly.** +- Fixed a bug that would break port mappings when a container was connected to or disconnected from a network after it was started. Fixes [docker/for-mac#7693](https://github.com/docker/for-mac/issues/7693). + +#### For Windows + +- Fixed a bug that prevented CLI plugins from being deployed to `~/.docker/cli-plugins` by default when users lacked the correct permissions. +- Fixed a bug where relocating the WSL data distribution would fail if the `docker-desktop` distribution was not present. +- Fixed a typo in the WSL install URL in the Docker Desktop Dashboard. +- Fixed an issue where certain WSL distros would fail to integrate. Fixes [docker/for-win#14686](https://github.com/docker/for-win/issues/14686) + +## 4.44.3 + +{{< release-date date="2025-08-20" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.44.3" build_path="/202357/" >}} + +### Security + +- Fixed [CVE-2025-9074](https://www.cve.org/CVERecord?id=CVE-2025-9074) where a malicious container running on Docker Desktop could access the Docker Engine and launch additional containers without requiring the Docker socket to be mounted. This could allow unauthorized access to user files on the host system. Enhanced Container Isolation (ECI) does not mitigate this vulnerability. + +### Bug fixes and enhancements + +- Fixed a bug which caused the Docker Offload dialog to block users from accessing the dashboard. + +## 4.44.2 + +{{< release-date date="2025-08-15" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.44.2" build_path="/202017/" >}} + +### Bug fixes and enhancements + + - Adds [Docker Offload](/manuals/offload/_index.md) to the **Beta features** settings tab and includes updates to support [Docker Offload Beta](https://www.docker.com/products/docker-offload/). + +## 4.44.1 + +{{< release-date date="2025-08-13" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.44.1" build_path="/201842/" >}} + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue found in version 4.44.0 that caused startup to fail when `vpnkit` CIDR is locked without specifying a value in Desktop Settings Management. + +#### For Windows + +- Fixed an issue where volumes and containers were not visible after an upgrade from distributions using the legacy `version-pack-data` directory structure. +- Resolved a rare issue in WSL 2 where the Docker CLI failed with a **Proxy Authentication Required** error. +- Fixed a bug where CLI plugins were not deployed to `~/.docker/cli-plugins` if the user lacked execution permissions on that directory. + +## 4.44.0 + +{{< release-date date="2025-08-07" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.44.0" build_path="/201307/" >}} + +### New + +- WSL 2 stability improvements. +- You can now inspect requests and responses to help you diagnose model-related issues in Docker Model Runner. +- Added the ability to run multiple models and receive a warning on insufficient resources. This avoids Docker Desktop freezing when using big models. +- Added new MCP clients to the MCP Toolkit: Gemini CLI, Goose. +- Introduced `--gpu` (Windows only) and `--cors` flags for `docker desktop enable model-runner`. +- Added a new `docker desktop kubernetes` command to the Docker Desktop CLI. +- You can now search for specific configuration options within **Settings**. +- Apple Virtualization is now the default VMM for better performance and QEMU Virtualization is removed. See [blog post](https://www.docker.com/blog/docker-desktop-for-mac-qemu-virtualization-option-to-be-deprecated-in-90-days/). +- Performance and stability improvements to the DockerVMM. + +### Upgrades + +- [Docker Compose v2.39.1](https://github.com/docker/compose/releases/tag/v2.39.1) +- [Docker Buildx v0.26.1](https://github.com/docker/buildx/releases/tag/v0.26.1) +- [Docker Engine v28.3.2](https://docs.docker.com/engine/release-notes/28/#2832) +- [Docker Scout CLI v1.18.2](https://github.com/docker/scout-cli/releases/tag/v1.18.2) +- [Docker Model CLI v0.1.36](https://github.com/docker/model-cli/releases/tag/v0.1.36) +- [Docker Desktop CLI v0.2.0](/manuals/desktop/features/desktop-cli.md) + +### Security + +We are aware of [CVE-2025-23266](https://nvd.nist.gov/vuln/detail/CVE-2025-23266), a critical vulnerability affecting the NVIDIA Container Toolkit in CDI mode up to version 1.17.7. Docker Desktop includes version 1.17.8, which is not impacted. However, older versions of Docker Desktop that bundled earlier toolkit versions may be affected if CDI mode was manually enabled. Upgrade to Docker Desktop 4.44 or later to ensure you're using the patched version. + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue pulling images with zstd differential layers when the containerd image store is enabled. +- Fixed a bug causing containers launching with the `--restart` flag to not restart properly when using Enhanced Container Isolation. +- Improved interaction between [Kubernetes custom registry images](/manuals/desktop/use-desktop/kubernetes.md#configuring-a-custom-image-registry-for-kubernetes-control-plane-images) and Enhanced Container Isolation (ECI), so the [ECI Docker Socket image list](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md) no longer needs to be manually updated when using a custom registry for Kubernetes control plane images. +- Fixed a bug where a Docker Desktop Kubernetes cluster in kind mode fails to start after restarting Docker Desktop if the user is required to be signed in but is currently signed out. +- Fixed a bug that prevented the mounting of MCP secrets into containers when [Enhanced Container Isolation](/enterprise/security/hardened-desktop/enhanced-container-isolation/) is enabled. +- Fixed a bug preventing the use of `--publish-all` when `--publish` was already specified. +- Fixed a bug causing the **Images** view to scroll infinitely. Fixes [docker/for-mac#7725](https://github.com/docker/for-mac/issues/7725). +- Fixed a bug which caused the **Volumes** tab to be blank while in Resource Saver mode. +- Updated terms of service text on first launch. +- More robustness in parsing newly released GGUF formats. + +#### For Mac + +- Fixed disk corruption on DockerVMM when reclaiming disk space. +- Fixed regression since 4.42.0 on DockerVMM by re-introducing performance boost on general usage. +- Removed QEMU hypervisor and switched to Apple Virtualization as the new default. See [blog post](https://www.docker.com/blog/docker-desktop-for-mac-qemu-virtualization-option-to-be-deprecated-in-90-days/). +- Fixed a bug preventing Traefik from autodetecting containers' ports. Fixes [docker/for-mac#7693](https://github.com/docker/for-mac/issues/7693). +- Fixed a bug that caused port mappings to break when a container was connected to or disconnected from a network after it was started. Fixes [docker/for-mac#7693](https://github.com/docker/for-mac/issues/7693#issuecomment-3131427879). +- Removed eBPF which blocked `io_uring`. To enable `io_uring` in a container, use `--security-opt seccomp=unconfined`. Fixes [docker/for-mac#7707](https://github.com/docker/for-mac/issues/7707). +- Docker Model Runner now supports GPT OSS models. + +#### For Windows + +- Re-added `docker-users` group to the named pipe security descriptors. +- Fixed an installer crash when the current user has no `SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall` registry key. +- Fixed a bug where Docker Desktop could leak a `com.docker.build` process and fail to start. Fixed [docker/for-win#14840](https://github.com/docker/for-win/issues/14840). +- Fixed a bug that was preventing Docker Desktop Kubernetes in kind mode from starting when using WSL with `cgroups v1` and Enhanced Container Isolation (ECI) is enabled. +- Fixed a typo in the WSL installation URL in the UI. +- Docker Model Runner now supports GPT OSS models + +## 4.43.2 + +{{< release-date date="2025-07-15" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.43.2" build_path="/199162/" >}} + +### Upgrades + +- [Docker Compose v2.38.2](https://github.com/docker/compose/releases/tag/v2.38.2) +- [Docker Engine v28.3.2](https://docs.docker.com/engine/release-notes/28/#2832) +- Docker Model CLI v0.1.33 + +## 4.43.1 + +{{< release-date date="2025-07-04" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.43.1" build_path="/198352/" >}} + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue that caused Docker Desktop UI to break when Ask Gordon responses contained HTML tags. +- Fixed an issue that prevented extensions from communicating with their backends. + +## 4.43.0 + +{{< release-date date="2025-07-03" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.43.0" build_path="/198134/" >}} + +### New + +- [Compose Bridge](/manuals/compose/bridge/_index.md) is now generally available. + +### Upgrades + +- [Docker Buildx v0.25.0](https://github.com/docker/buildx/releases/tag/v0.25.0) +- [Docker Compose v2.38.1](https://github.com/docker/compose/releases/tag/v2.38.1) +- [Docker Engine v28.3.0](https://docs.docker.com/engine/release-notes/28/#2830) +- [NVIDIA Container Toolkit v1.17.8](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.17.8) + +### Security + +- Fixed [CVE-2025-6587](https://www.cve.org/CVERecord?id=CVE-2025-6587) where sensitive system environment variables were included in Docker Desktop diagnostic logs, allowing for potential secret exposure. + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a bug causing `docker start` to drop the container's port mappings for a container already running. +- Fixed a bug that prevented container ports to be displayed on the GUI when a container was re-started. +- Fixed a bug that caused Docker API `500 Internal Server Error for API route and version` error application start. +- The settings **Apply & restart** button is now labeled **Apply**. The VM is no longer restarted when applying changed settings. +- Fixed a bug where the disk would be corrupted if Docker is shutdown during a `fsck`. +- Fixed a bug causing an incorrect `~/.kube/config` in WSL2 when using a `kind` Kubernetes cluster. +- Return an explicit error to a Docker API / `docker` CLI command if Docker Desktop has been manually paused. +- Fixed an issue where unknown keys in Admin and Cloud settings caused a failure. + +#### For Mac + +- Removed `eBPF` which blocked `io_uring`. To enable `io_uring` in a container, use `--security-opt seccomp=unconfined`. Fixes [docker/for-mac#7707](https://github.com/docker/for-mac/issues/7707). + +#### For Windows + +- Fixed an issue that caused the Docker Desktop installer to crash when the current user has no `SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall` registry key. +- Fixed a bug where Docker Desktop could leak a `com.docker.build` process and fail to start. Fixes [docker/for-win#14840](https://github.com/docker/for-win/issues/14840) + +### Known issues + +#### For all platforms + +- `docker buildx bake` will not build images in Compose files with a top-level models attribute. Use `docker compose build` instead. +- Gordon responses containing HTML can cause Desktop UI to be permanently broken. As a workaround, you can delete `persisted-state.json` file to reset the UI. The file is located in the following directories: + - Windows: `%APPDATA%\Docker Desktop\persisted-state.json` + - Linux: `$XDG_CONFIG_HOME/Docker Desktop/persisted-state.json` or `~/.config/Docker Desktop/persisted-state.json` + - Mac: `~/Library/Application Support/Docker Desktop/persisted-state.json` + +#### For Windows + +- Possible incompatibility between the "host networking" feature of Docker Desktop and the most recent WSL 2 Linux kernel. If you encounter such issues, downgrade WSL 2 to 2.5.7. + +## 4.42.1 + +{{< release-date date="2025-06-18" >}} + +### Upgrades + +- [Docker Compose v2.37.1](https://github.com/docker/compose/releases/tag/v2.37.1) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where Docker domains were not reachable when the proxy configuration is not valid. +- Fixed a possible deadlock when exposing ports. +- Fixed a race condition which can cause `docker run -p` ports to disappear. + +#### For Mac + +- Fixed a bug where a container’s port list appeared empty when inspected immediately after it was created, for example, when using a script. [docker/for-mac#7693](https://github.com/docker/for-mac/issues/7693) + +#### For Windows + +- Disabled the Resource Saver mode in WSL 2 to prevent `docker` CLI commands hanging in WSL 2 distros. [docker/for-win#14656](https://github.com/docker/for-win/issues/14656#issuecomment-2960285463) + +## 4.42.0 + +{{< release-date date="2025-06-04" >}} + +### New + +- Expanded network compatibility with IPv6 support. +- The Docker MCP Toolkit is now natively integrated into Docker Desktop. +- Docker Model Runner is now available for Windows systems running on Qualcomm/ARM GPUs. +- Added a **Logs** tab to the Models view so you can see the inference engine output in real time. +- Gordon now integrates the MCP Toolkit, providing access to 100+ MCP servers. + +### Upgrades + +- [Docker Buildx v0.24.0](https://github.com/docker/buildx/releases/tag/v0.24.0) +- [Docker Engine v28.2.2](https://docs.docker.com/engine/release-notes/28/#2822) +- [Compose Bridge v0.0.20](https://github.com/docker/compose-bridge-binaries/releases/tag/v0.0.20) +- [Docker Compose v2.36.2](https://github.com/docker/compose/releases/tag/v2.36.2) +- [NVIDIA Container Toolkit v1.17.7](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.17.7) +- [Docker Scout CLI v1.18.0](https://github.com/docker/scout-cli/releases/tag/v1.18.0) + +### Bug fixes and enhancements + +#### For all platforms + +- Docker Desktop now accepts certificates with a negative serial number. +- Re-enable `seccomp` for containers by default. Use `docker run --security-opt seccomp=unconfined` to disable seccomp for a container. +- Fixed a bug that caused Docker Desktop to hang when it ran out of memory. +- Block `io_uring` syscalls in containers. +- Added support for pulling models from Docker Hub directly, simplifying the process of accessing and using models. +- Docker Desktop now sets the disk usage limit to the size of the physical disk on fresh install and reset to defaults on Mac and Linux. +- The maximum disk size in the settings UI now aligns with the full capacity of the host file system. +- The **Models** view now has a **Docker Hub** tab that lists models under the `ai` namespace. +- Improved the sign-in enforcement message when more than 10 organizations are enforced. +- Changed the way ports are mapped by Docker Desktop to fully support IPv6 ports. +- Fixed a bug in the Dashboard container logs screen causing the scrollbar to disappear as the mouse approaches. +- [Enforced sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) fixed for Teams subscription users. +- `llama.cpp` server now supports streaming and tool calling in Model Runner. +- Sign-in Enforcement capability is now available to all subscriptions. + +#### For Mac + +- Fixed a bug where the disk would always have a minimum usage limit of 64GB when using Docker VMM. +- Disabled the memory protection keys mechanism in the Docker Desktop Linux VM. This caused VS Code Dev Containers to not work properly. See [docker/for-mac#7667](https://github.com/docker/for-mac/issues/7667). +- Fixed persistent volume claims under Kubernetes. Fixes [docker/for-mac#7625](https://github.com/docker/for-mac/issues/7625). +- Fixed a bug where the VM failed to start using Apple virtualization.framework. +- Minimum version to install or update Docker Desktop on is now macOS Ventura 13.3. + +#### For Windows + +- Fixed a bug in Enhanced Container Isolation on Windows WSL, where files with hardlinks inside containers had `nobody:nogroup` ownership. +- Fixed a bug that caused Docker Desktop to crash. Related to [docker/for-win#14782](https://github.com/docker/for-win/issues/14782). +- Fixed a bug that caused `The network name cannot be found` error when starting with WSL 2. Fixes [docker/for-win#14714](https://github.com/docker/for-win/issues/14714). +- Fixed an issue where Docker Desktop would not remove entries in the hosts file when uninstalling. +- Fixed an issue when reading auto-start registry key for some system languages. Fixes [docker/for-win#14731](https://github.com/docker/for-win/issues/14731). +- Fixed a bug where Docker Desktop was adding unrecognised /etc/wsl.conf `crossDistro` option which was causing WSL 2 to log an error. See [microsoft/WSL#4577](https://github.com/microsoft/WSL/issues/4577) +- Fixed a bug where Docker Desktop failed to start on WSL 2.5.7 if another WSL distro is still using Linux cgroups v1. Fixes [docker/for-win#14801](https://github.com/docker/for-win/issues/14801) +- Windows Subsystem for Linux (WSL) version 2.1.5 is now the minimum version required for proper functioning of Docker Desktop application + +### Known issues + +#### For all platforms + +- This release contains a regression with `docker port`, resulting in "No host port found for host IP" errors when using testcontainers-node. See [testcontainers/testcontainers-node#818](https://github.com/testcontainers/testcontainers-node/issues/818#issuecomment-2941575369) + +#### For Windows + +- Running containers with Wasm will hang sporadically. See [docker/for-mac#7666](https://github.com/docker/for-mac/issues/7666). +- On some machines Resource Saver will cause other WSL 2 distros to freeze. The workaround is to disable Resource Saver. See [docker/for-win#14656](https://github.com/docker/for-win/issues/14656). ## 4.41.2 {{< release-date date="2025-05-06" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.41.2" build_path="/191736/" >}} - ### Bug fixes and enhancements #### For all platforms @@ -45,8 +1357,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-04-30" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.41.1" build_path="/191279/" >}} - ### Bug fixes and enhancements #### For all platforms @@ -61,14 +1371,12 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-04-28" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.41.0" build_path="/190950/" >}} - ### New - Docker Model Runner is now available on x86 Windows machines with NVIDIA GPUs. - You can now [push models](/manuals/ai/model-runner.md#push-a-model-to-docker-hub) to Docker Hub with Docker Model Runner. - Added support for Docker Model Runner's model management and chat interface in Docker Desktop for Mac and Windows (on hardware supporting Docker Model Runner). Users can now view, interact with, and manage local AI models through a new dedicated interface. -- [Docker Compose](/manuals/compose/how-tos/model-runner.md) and Testcontainers [Java](https://java.testcontainers.org/modules/docker_model_runner/) and [Go](https://golang.testcontainers.org/modules/dockermodelrunner/) now support Docker Model Runner. +- [Docker Compose](/manuals/ai/compose/models-and-compose.md) and Testcontainers [Java](https://java.testcontainers.org/modules/docker_model_runner/) and [Go](https://golang.testcontainers.org/modules/dockermodelrunner/) now support Docker Model Runner. - Introducing Docker Desktop in the [Microsoft App Store](https://apps.microsoft.com/detail/xp8cbj40xlbwkx?hl=en-GB&gl=GB). ### Upgrades @@ -96,7 +1404,7 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo - Improved error messages when downloading Registry Access Management configuration. - If Docker can't bind an ICMPv4 socket, it now logs an error and continues rather than quits. - Enabled the memory protection keys mechanism in the Docker Desktop Linux VM, allowing containers like Oracle database images to run correctly. -- Fixed a problem with containers accessing `/proc/sys/kernel/shm*` sysctls when [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) is enabled on Mac, Windows Hyper-V, or Linux. +- Fixed a problem with containers accessing `/proc/sys/kernel/shm*` sysctls when [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) is enabled on Mac, Windows Hyper-V, or Linux. - Added kernel module `nft_fib_inet`, required for running firewalld in a Linux container. - MacOS QEMU Virtualization option is being deprecated on July 14, 2025. @@ -125,8 +1433,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-03-31" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.40.0" build_path="/187762/" >}} - ### New - You can now pull, run, and manage AI models from Docker Hub directly in Docker Desktop with [Docker Model Runner (Beta)](/manuals/ai/model-runner.md). Currently available for Docker Desktop for Mac with Apple Silicon. @@ -174,12 +1480,10 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-03-05" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.39.0" build_path="/184744/" >}} - ### New - The [Docker Desktop CLI](/manuals/desktop/features/desktop-cli.md) is now generally available. You can now also print logs with the new `docker desktop logs` command. -- Docker Desktop now supports the `--platform` flag on [`docker load`](/reference/cli/docker/image/load.md) and [`docker save`](/reference/cli/docker/image/save.md). This helps you import and export a subset of multi-platform images. +- Docker Desktop now supports the `--platform` flag on [`docker load`](/reference/cli/docker/image/load/) and [`docker save`](/reference/cli/docker/image/save/). This helps you import and export a subset of multi-platform images. ### Upgrades @@ -230,14 +1534,12 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-01-30" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.38.0" build_path="/181591/" >}} - ### New - Installing Docker Desktop via the PKG installer is now generally available. - Enforcing sign-in via configuration profiles is now generally available. - Docker Compose, Docker Scout, the Docker CLI, and Ask Gordon can now be updated independently of Docker Desktop and without a full restart (Beta). -- The new [`update` command](/reference/cli/docker/desktop/update.md) has been added to the Docker Desktop CLI (Mac only). +- The new [`update` command](/reference/cli/docker/desktop/update/) has been added to the Docker Desktop CLI (Mac only). - [Bake](/manuals//build/bake/_index.md) is now generally available, with support for entitlements and composable attributes. - You can now create [multi-node Kubernetes clusters](/manuals/desktop/settings-and-maintenance/settings.md#kubernetes) in Docker Desktop. - [Ask Gordon](/manuals/ai/gordon/_index.md) is more widely available. It is still in Beta. @@ -259,7 +1561,7 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo #### For all platforms - Fixed a bug where access tokens generated by the `docker login` web flow could not be refreshed by Docker Desktop. -- Fixed a bug where container creation via the Docker API using `curl` failed when [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) was enabled. +- Fixed a bug where container creation via the Docker API using `curl` failed when [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) was enabled. - Fixed a bug where the RAM policy was not refreshed after the refresh period had elapsed. - Fixed a bug in Enhanced Container Isolation when mounting the Docker socket into a container, and then creating Docker containers with bind-mounts from within that container. - Fixed an issue that caused a discrepancy between the GUI and the CLI, the former forcing the `0.0.0.0` HostIP in port-mappings. This caused default binding IPs configured through Engine's `ip` flag, or through the bridge option `com.docker.network.bridge.host_binding_ipv4`, to not be used. @@ -297,8 +1599,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-01-09" >}} -{{< desktop-install-v2 mac=true version="4.37.2" build_path="/179585/" >}} - ### Bug fixes and enhancements #### For Mac @@ -315,14 +1615,12 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-12-17" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.37.1" build_path="/178610/" >}} - ### Bug fixes and enhancements #### For all platforms - Fixed an issue that caused the AI Catalog in Docker Hub to be unavailable in Docker Desktop. -- Fixed an issue that caused Docker Desktop to panic with `index out of range [0] with length 0` when using [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md). +- Fixed an issue that caused Docker Desktop to panic with `index out of range [0] with length 0` when using [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md). ### Known issues @@ -334,8 +1632,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-12-12" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.37.0" build_path="/178034/" >}} - ### New - You can now perform key operations such as starting, stopping, restarting, and checking the status of Docker Desktop directly from the [command line](/manuals/desktop/features/desktop-cli.md) (Beta). @@ -394,8 +1690,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-01-09" >}} -{{< desktop-install-v2 mac=true version="4.36.1" build_path="/179655/" >}} - ### Bug fixes and enhancements #### For Mac @@ -412,19 +1706,17 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-11-18" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.36.0" build_path="/175267/" >}} - ### New - Existing Docker Desktop installations using the WSL2 engine on Windows are now automatically migrated to a unified single-distribution architecture for enhanced consistency and performance. - Administrators can now: - - Enforce sign-in with macOS [configuration profiles](/manuals/security/for-admins/enforce-sign-in/methods.md#configuration-profiles-method-mac-only) (Early Access). + - Enforce sign-in with macOS [configuration profiles](/manuals/enterprise/security/enforce-sign-in/methods.md#configuration-profiles-method-mac-only) (Early Access). - Enforce sign-in for more than one organization at a time (Early Access). - - Deploy Docker Desktop for Mac in bulk with the [PKG installer](/manuals/desktop/setup/install/enterprise-deployment/pkg-install-and-configure.md) (Early Access). + - Deploy Docker Desktop for Mac in bulk with the [PKG installer](/manuals/enterprise/enterprise-deployment/pkg-install-and-configure.md) (Early Access). - Use Desktop Settings Management to manage and enforce defaults via admin.docker.com (Early Access). - Enhance Container Isolation (ECI) has been improved to: - - Allow admins to [turn off Docker socket mount restrictions](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md#allowing-all-containers-to-mount-the-docker-socket). - - Support wildcard tags when using the [`allowedDerivedImages` setting](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images). + - Allow admins to [turn off Docker socket mount restrictions](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#allowing-all-containers-to-mount-the-docker-socket). + - Support wildcard tags when using the [`allowedDerivedImages` setting](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images). ### Upgrades @@ -474,7 +1766,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-01-09" >}} -{{< desktop-install-v2 mac=true version="4.35.2" build_path="/179656/" >}} ### Bug fixes and enhancements @@ -492,8 +1783,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-10-30" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.35.1" build_path="/173168/" >}} - #### For all platforms - Fixed a bug where Docker Desktop would incorrectly bind to port `8888`. Fixes [docker/for-win#14389](https://github.com/docker/for-win/issues/14389) and [docker/for-mac#7468](https://github.com/docker/for-mac/issues/7468) @@ -502,8 +1791,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-10-24" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.35.0" build_path="/172550/" >}} - ### New - Support for [Docker Desktop on Red Hat Enterprise Linux](/manuals/desktop/setup/install/linux/rhel.md) is now generally available. @@ -533,7 +1820,7 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo - Fixed a bug where the **Push to Docker Hub** action in the **Images** view would result in an `invalid tag format` error. Fixes [docker/for-win#14258](https://github.com/docker/for-win/issues/14258). - Fixed an issue where Docker Desktop startup failed when ICMPv6 setup was not successful. - Added drivers that allow USB/IP to work. -- Fixed a bug in Enhanced Container Isolation (ECI) [Docker socket mount permissions for derived images](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md) where it was incorrectly denying Docker socket mounts for some images when Docker Desktop uses the containerd image store. +- Fixed a bug in Enhanced Container Isolation (ECI) [Docker socket mount permissions for derived images](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md) where it was incorrectly denying Docker socket mounts for some images when Docker Desktop uses the containerd image store. - Enable `NFT_NUMGEN`, `NFT_FIB_IPV4` and `NFT_FIB_IPV6` kernel modules. - Build UI: - Highlight build check warnings in the **Completed builds** list. @@ -541,7 +1828,7 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo - Image tags added to **Build results** section under the **Info** tab. - Improved efficiency of host-side disk utilization for fresh installations on Mac and Linux. - Fixed a bug that prevented the Sign in enforcement popup to be triggered when token expires. -- Fixed a bug where containers would not be displayed in the GUI immediately after signing in when using [enforced sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +- Fixed a bug where containers would not be displayed in the GUI immediately after signing in when using [enforced sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). - `settings.json` has been renamed to `settings-store.json` - The host networking feature no longer requires users to be signed-in in order to use it. @@ -578,8 +1865,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2025-01-09" >}} -{{< desktop-install-v2 mac=true version="4.34.4" build_path="/179671/" >}} - ### Bug fixes and enhancements #### For Mac @@ -596,7 +1881,7 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-10-09" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.34.3" build_path="/170107/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.34.3" build_path="/170107/" >}} ### Upgrades @@ -612,8 +1897,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-09-12" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.34.2" build_path="/167172/" >}} - ### Bug fixes and enhancements #### For all platforms @@ -629,7 +1912,7 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-09-05" >}} -{{< desktop-install-v2 win=true beta_win_arm=true version="4.34.1" build_path="/166053/" >}} +{{< desktop-install-v2 win=true win_arm_release="Beta" version="4.34.1" build_path="/166053/" >}} ### Bug fixes and enhancements @@ -641,15 +1924,13 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-08-29" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.34.0" build_path="/165256/" >}} - ### New - [Host networking](/manuals/engine/network/drivers/host.md#docker-desktop) support on Docker Desktop is now generally available. - If you authenticate via the CLI, you can now authenticate through a browser-based flow, removing the need for manual PAT generation. - Windows now supports automatic reclamation of disk space in Docker Desktop for WSL2 installations [using a managed virtual hard disk](/manuals/desktop/features/wsl/best-practices.md). -- Deploying Docker Desktop via the [MSI installer](/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md) is now generally available. -- Two new methods to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) (windows registry key and `.plist` file) are now generally available. +- Deploying Docker Desktop via the [MSI installer](/manuals/enterprise/enterprise-deployment/msi-install-and-configure.md) is now generally available. +- Two new methods to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) (windows registry key and `.plist` file) are now generally available. - Fresh installations of Docker Desktop now use the containerd image store by default. - [Compose Bridge](/manuals/compose/bridge/_index.md) (Experimental) is now available from the Compose file viewer. Easily convert and deploy your Compose project to a Kubernetes cluster. @@ -696,15 +1977,13 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo > [!NOTE] > Using `docker login` with an address that includes URL path segments is not a documented use case and is considered unsupported. The recommended usage is to specify only a registry hostname, and optionally a port, as the address for `docker login`. - When running `docker compose up` and Docker Desktop is in the Resource Saver mode, the command is unresponsive. As a workaround, manually exit the Resource Saving mode and Docker Compose becomes responsive again. -- When [Enhanced Container Isolation (ECI)](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) is enabled, Docker Desktop may not enter Resource Saver mode. This will be fixed in a future Docker Desktop release. -- The new [ECI Docker socket mount permissions for derived images](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images) feature does not yet work when Docker Desktop is configured with the **Use containerd for pulling and storing images**. This will be fixed in the next Docker Desktop release. +- When [Enhanced Container Isolation (ECI)](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) is enabled, Docker Desktop may not enter Resource Saver mode. This will be fixed in a future Docker Desktop release. +- The new [ECI Docker socket mount permissions for derived images](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images) feature does not yet work when Docker Desktop is configured with the **Use containerd for pulling and storing images**. This will be fixed in the next Docker Desktop release. ## 4.33.2 {{< release-date date="2025-01-09" >}} -{{< desktop-install-v2 mac=true version="4.33.2" build_path="/179689/" >}} - ### Bug fixes and enhancements #### For Mac @@ -721,8 +2000,6 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-07-31" >}} -{{< desktop-install-v2 win=true beta_win_arm=true version="4.33.0" build_path="/161083/" >}} - ### Bug fixes and enhancements #### For Windows @@ -733,11 +2010,11 @@ For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoo {{< release-date date="2024-07-25" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.33.0" build_path="/160616/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.33.0" build_path="/160616/" >}} ### New -- [Docker Debug](/reference/cli/docker/debug.md) is now generally available. +- [Docker Debug](/reference/cli/docker/debug/) is now generally available. - BuildKit now evaluates Dockerfile rules to inform you of potential issues. - **Resource Allocation** settings can now be accessed directly from the resource usage data displayed in the Dashboard footer. - New and improved experience for [troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md). @@ -825,7 +2102,6 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL {{< release-date date="2024-07-04" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.32.0" build_path="/157355/" >}} ### New @@ -848,7 +2124,7 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL - Improved instructions for `watch` in the Compose File Viewer - Added support for Golang projects that don't have dependencies in Docker Init. Addresses [docker/roadmap#611](https://github.com/docker/roadmap/issues/611) -- [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) now lets admins set the default value to `ProxyEnableKerberosNTLM`. +- [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) now lets admins set the default value to `ProxyEnableKerberosNTLM`. - Removed a temporary compatibility fix for older versions of Visual Studio Code. - Builds view: - Changed icon for imported build record to a "files" icon. @@ -893,10 +2169,6 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL ## 4.31.1 -{{< release-date date="2024-06-10" >}} - -{{< desktop-install win=true beta_win_arm=true version="4.31.1" build_path="/153621/" >}} - ### Bug fixes and enhancements #### For Windows @@ -905,13 +2177,9 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL ## 4.31.0 -{{< release-date date="2024-06-06" >}} - -{{< desktop-install all=true beta_win_arm=true version="4.31.0" build_path="/153195/" >}} - ### New -- [Air-Gapped Containers](/manuals/security/for-admins/hardened-desktop/air-gapped-containers.md) is now generally available. +- [Air-Gapped Containers](/manuals/enterprise/security/hardened-desktop/air-gapped-containers.md) is now generally available. - Docker Compose File Viewer shows your Compose YAML with syntax highlighting and contextual links to relevant docs (Beta, progressive rollout). - New Sidebar user experience. @@ -935,7 +2203,7 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL - Added `proxyEnableKerberosNTLM` config to `settings.json` to enable fallback to basic proxy authentication if Kerberos/NTLM environment is not properly set up. - Fixed a bug where Docker Debug was not working properly with Enhanced Container Isolation enabled. - Fixed a bug where UDP responses were not truncated properly. -- Fixed a bug where the **Update** screen was hidden when using [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- Fixed a bug where the **Update** screen was hidden when using [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). - Fixed a bug where proxy settings defined in `admin-settings.json` were not applied correctly on startup. - Fixed a bug where the **Manage Synchronized file shares with Compose** toggle did not correctly reflect the value with the feature. - Fixed a bug where a bind mounted file modified on host is not updated after the container restarts, when gRPC FUSE file sharing is used on macOS and on Windows with Hyper-V. Fixes [docker/for-mac#7274](https://github.com/docker/for-mac/issues/7274), [docker/for-win#14060](https://github.com/docker/for-win/issues/14060). @@ -993,14 +2261,12 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL {{< release-date date="2024-05-06" >}} -{{< desktop-install all=true beta_win_arm=true version="4.30.0" build_path="/149282/" >}} - ### New #### For all platforms - Docker Desktop now supports [SOCKS5 proxies](/manuals/desktop/features/networking.md#socks5-proxy-support). Requires a Business subscription. -- Added a new setting to manage the onboarding survey in [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- Added a new setting to manage the onboarding survey in [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). #### For Windows @@ -1075,18 +2341,16 @@ This can be resolved by adding the user to the **docker-users** group. Before st {{< release-date date="2024-04-08" >}} -{{< desktop-install all=true beta_win_arm=true version="4.29.0" build_path="/145265/" >}} - ### New -- You can now enforce Rosetta usage via [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). -- [Docker socket mount restrictions](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md) with ECI is now generally available. +- You can now enforce Rosetta usage via [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). +- [Docker socket mount restrictions](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md) with ECI is now generally available. - Docker Engine and CLI updated to [Moby 26.0](https://github.com/moby/moby/releases/tag/v26.0.0). This includes Buildkit 0.13, sub volumes mounts, networking updates, and improvements to the containerd multi-platform image store UX. - New and improved Docker Desktop error screens: swift troubleshooting, easy diagnostics uploads, and actionable remediation. - Compose supports [Synchronized file shares (experimental)](/manuals/desktop/features/synchronized-file-sharing.md). - New [interactive Compose CLI (experimental)](/manuals/compose/how-tos/environment-variables/envvars.md#compose_menu). - Beta release of: - - Air-Gapped Containers with [Settings Management](/manuals/security/for-admins/hardened-desktop/air-gapped-containers/_index.md). + - Air-Gapped Containers with [Settings Management](/manuals/enterprise/security/hardened-desktop/air-gapped-containers.md). - [Host networking](/manuals/engine/network/drivers/host.md#docker-desktop) in Docker Desktop. - [Docker Debug](use-desktop/container.md#integrated-terminal) for running containers. - [Volumes Backup & Share extension](use-desktop/volumes.md) functionality available in the **Volumes** tab. @@ -1157,7 +2421,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st ### New -- [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) now allows admins to set the default file-sharing implementation and specify which paths developer can add file shares to. +- [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) now allows admins to set the default file-sharing implementation and specify which paths developer can add file shares to. - Added support for `socks5://` HTTP and HTTPS proxy URLs when the [`SOCKS` proxy support beta feature](/manuals/desktop/features/networking.md) is enabled. - Users can now filter volumes to see which ones are in use in the **Volumes** tab. @@ -1276,9 +2540,9 @@ This can be resolved by adding the user to the **docker-users** group. Before st - Docker init now supports Java and is generally available to all users. - [Synchronized File Shares](/manuals/desktop/features/synchronized-file-sharing.md) provides fast and flexible host-to-VM file sharing within Docker Desktop. Utilizing the technology behind [Docker’s acquisition of Mutagen](https://www.docker.com/blog/mutagen-acquisition/), this feature provides an alternative to virtual bind mounts that uses synchronized filesystem caches, improving performance for developers working with large codebases. -- Organization admins can now [configure Docker socket mount permissions](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md) when ECI is enabled. +- Organization admins can now [configure Docker socket mount permissions](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md) when ECI is enabled. - [Containerd Image Store](/manuals/desktop/features/containerd.md) support is now generally available to all users. -- Get a debug shell into any container or image with the new [`docker debug` command](/reference/cli/docker/debug.md) (Beta). +- Get a debug shell into any container or image with the new [`docker debug` command](/reference/cli/docker/debug/) (Beta). - Organization admins, with a Docker Business subscription, can now configure a custom list of extensions with [Private Extensions Marketplace](/manuals/extensions/private-marketplace.md) enabled (Beta) ### Upgrades @@ -1297,7 +2561,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st #### For all platforms -- The `docker scan` command has been removed. To continue learning about the vulnerabilities of your images, and many other features, use the [`docker scout` command](/reference/cli/docker/scout/_index.md). +- The `docker scan` command has been removed. To continue learning about the vulnerabilities of your images, and many other features, use the [`docker scout` command](/reference/cli/docker/scout/). - Fixed a bug where automatic updates would not download when the **Always download updates** checkbox was selected. - Fixed typo in the dashboard tooltip. Fixes [docker/for-mac#7132](https://github.com/docker/for-mac/issues/7132) - Improved signal handling behavior (e.g. when pressing Ctrl-C in the terminal while running a `docker` command). @@ -1377,7 +2641,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st ### New -- Administrators can now control access to beta and experimental features in the **Features in development** tab with [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- Administrators can now control access to beta and experimental features in the **Features in development** tab with [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). - Introduced four new version update states in the footer. - `docker init` (Beta) now supports PHP with Apache + Composer. - The [**Builds** view](use-desktop/builds.md) is now GA. You can now inspect builds, troubleshoot errors, and optimize build speed. @@ -1487,7 +2751,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st - Rosetta is now Generally Available for all users on macOS 13 or later. It provides faster emulation of Intel-based images on Apple Silicon. To use Rosetta, see [Settings](/manuals/desktop/settings-and-maintenance/settings.md). Rosetta is enabled by default on macOS 14.1 and later. - Docker Desktop now detects if a WSL version is out of date. If an out dated version of WSL is detected, you can allow Docker Desktop to automatically update the installation or you can manually update WSL outside of Docker Desktop. - New installations of Docker Desktop for Windows now require a Windows version of 19044 or later. -- Administrators now have the ability to control Docker Scout image analysis in [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- Administrators now have the ability to control Docker Scout image analysis in [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). ### Upgrades @@ -1566,8 +2830,6 @@ This can be resolved by adding the user to the **docker-users** group. Before st {{< release-date date="2023-10-04" >}} -{{< desktop-install win=true version="4.24.1" build_path="/123237/" >}} - ### Bug fixes and enhancements #### For Windows @@ -1657,12 +2919,12 @@ This can be resolved by adding the user to the **docker-users** group. Before st ### New - Added support for new Wasm runtimes: wws and lunatic. -- [`docker init`](/reference/cli/docker/init.md) now supports ASP.NET +- [`docker init`](/reference/cli/docker/init/) now supports ASP.NET - Increased performance of exposed ports on macOS, for example with `docker run -p`. ### Removed -- Removed Compose V1 from Docker Desktop as it has stopped receiving updates. Compose V2 has replaced it and is now integrated into all current Docker Desktop versions. For more information, see [Migrate to Compose V2](/manuals/compose/releases/migrate.md). +- Removed Compose V1 from Docker Desktop as it has stopped receiving updates. Compose V2 has replaced it and is now integrated into all current Docker Desktop versions. ### Bug fixes and enhancements @@ -1750,7 +3012,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st #### For all platforms -- [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) now lets you turn off Docker Extensions for your organisation. +- [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) now lets you turn off Docker Extensions for your organisation. - Fixed a bug where turning on Kubernetes from the UI failed when the system was paused. - Fixed a bug where turning on Wasm from the UI failed when the system was paused. - Bind mounts are now shown when you [inspect a container](use-desktop/container.md). @@ -1908,7 +3170,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st ### Removed -- Removed `docker scan` command. To continue learning about the vulnerabilities of your images, and many other features, use the new `docker scout` command. Run `docker scout --help`, or [read the docs to learn more](/reference/cli/docker/scout/_index.md). +- Removed `docker scan` command. To continue learning about the vulnerabilities of your images, and many other features, use the new `docker scout` command. Run `docker scout --help`, or [read the docs to learn more](/reference/cli/docker/scout/). ### Upgrades @@ -1927,7 +3189,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st - Added more details to dashboard errors when a `docker-compose` action fails ([docker/for-win#13378](https://github.com/docker/for-win/issues/13378)). - Added support for setting HTTP proxy configuration during installation. This can be done via the `--proxy-http-mode`, `--overrider-proxy-http`, `--override-proxy-https` and `--override-proxy-exclude` installer flags in the case of installation from the CLI on [Mac](/manuals/desktop/setup/install/mac-install.md#install-from-the-command-line) and [Windows](/manuals/desktop/setup/install/windows-install.md#install-from-the-command-line), or alternatively by setting the values in the `install-settings.json` file. - Docker Desktop now stops overriding .docker/config.json `credsStore` keys on application start. Note that if you use a custom credential helper then the CLI `docker login` and `docker logout` does not affect whether the UI is signed in to Docker or not. In general, it is better to sign into Docker via the UI since the UI supports multi-factor authentication. -- Added a warning about the [forthcoming removal of Compose V1 from Docker Desktop](/manuals/compose/releases/migrate.md). Can be suppressed with `COMPOSE_V1_EOL_SILENT=1`. +- Added a warning about the forthcoming removal of Compose V1 from Docker Desktop. Can be suppressed with `COMPOSE_V1_EOL_SILENT=1`. - In the Compose config, boolean fields in YAML should be either `true` or `false`. Deprecated YAML 1.1 values such as “on” or “no” now produce a warning. - Improved UI for image table, allowing rows to use more available space. - Fixed various bugs in port-forwarding. @@ -2382,7 +3644,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st ### New -- Two new security features have been introduced for Docker Business users, Settings Management and Enhanced Container Isolation. Read more about Docker Desktop’s new [Hardened Docker Desktop security model](/manuals/security/for-admins/hardened-desktop/_index.md). +- Two new security features have been introduced for Docker Business users, Settings Management and Enhanced Container Isolation. Read more about Docker Desktop’s new [Hardened Docker Desktop security model](/manuals/enterprise/security/hardened-desktop/_index.md). - Added the new Dev Environments CLI `docker dev`, so you can create, list, and run Dev Envs via command line. Now it's easier to integrate Dev Envs into custom scripts. - Docker Desktop can now be installed to any drive and folder using the `--installation-dir`. Partially addresses [docker/roadmap#94](https://github.com/docker/roadmap/issues/94). @@ -3079,7 +4341,7 @@ Installing Docker Desktop 4.5.0 from scratch has a bug which defaults Docker Des ### New - Easy, Secure sign in with Auth0 and Single Sign-on - - Single Sign-on: Users with a Docker Business subscription can now configure SSO to authenticate using their identity providers (IdPs) to access Docker. For more information, see [Single Sign-on](../security/for-admins/single-sign-on/_index.md). + - Single Sign-on: Users with a Docker Business subscription can now configure SSO to authenticate using their identity providers (IdPs) to access Docker. For more information, see [Single Sign-on](/manuals/enterprise/security/single-sign-on/_index.md). - Signing in to Docker Desktop now takes you through the browser so that you get all the benefits of auto-filling from password managers. ### Upgrades @@ -3205,7 +4467,7 @@ CVE-2021-44228](https://www.docker.com/blog/apache-log4j-2-cve-2021-44228/). Docker Desktop Dashboard incorrectly displays the container memory usage as zero on Hyper-V based machines. -You can use the [`docker stats`](/reference/cli/docker/container/stats.md) +You can use the [`docker stats`](/reference/cli/docker/container/stats/) command on the command line as a workaround to view the actual memory usage. See [docker/for-mac#6076](https://github.com/docker/for-mac/issues/6076). diff --git a/content/manuals/desktop/settings-and-maintenance/backup-and-restore.md b/content/manuals/desktop/settings-and-maintenance/backup-and-restore.md index 4ada95b5fda..6c6059d899e 100644 --- a/content/manuals/desktop/settings-and-maintenance/backup-and-restore.md +++ b/content/manuals/desktop/settings-and-maintenance/backup-and-restore.md @@ -8,15 +8,17 @@ aliases: - /desktop/backup-and-restore/ --- -Use this procedure to back up and restore your images and container data. This is useful if you want to reset your VM disk or to move your Docker environment to a new computer. +Use this procedure to back up and restore your images and container data. This is useful if you want to reset your VM disk or to move your Docker environment to a new computer, or recover from a failed Docker Desktop update or installation. > [!IMPORTANT] > > If you use volumes or bind-mounts to store your container data, backing up your containers may not be needed, but make sure to remember the options that were used when creating the container or use a [Docker Compose file](/reference/compose-file/_index.md) if you want to re-create your containers with the same configuration after re-installation. -## Save your data +## If Docker Desktop is functioning normally -1. Commit your containers to an image with [`docker container commit`](/reference/cli/docker/container/commit.md). +### Save your data + +1. Commit your containers to an image with [`docker container commit`](/reference/cli/docker/container/commit/). Committing a container stores filesystem changes and some container configurations, such as labels and environment variables, as a local image. Be aware that environment variables may contain sensitive information such as passwords or proxy-authentication, so take care when pushing the resulting image to a registry. @@ -26,20 +28,20 @@ Use this procedure to back up and restore your images and container data. This i If you used a [named volume](/manuals/engine/storage/_index.md#more-details-about-mount-types) to store container data, such as databases, refer to the [back up, restore, or migrate data volumes](/manuals/engine/storage/volumes.md#back-up-restore-or-migrate-data-volumes) page in the storage section. -2. Use [`docker push`](/reference/cli/docker/image/push.md) to push any +2. Use [`docker push`](/reference/cli/docker/image/push/) to push any images you have built locally and want to keep to the [Docker Hub registry](/manuals/docker-hub/_index.md). > [!TIP] > > [Set the repository visibility to private](/manuals/docker-hub/repos/_index.md) if your image includes sensitive content. - Alternatively, use [`docker image save -o images.tar image1 [image2 ...]`](/reference/cli/docker/image/save.md) + Alternatively, use [`docker image save -o images.tar image1 [image2 ...]`](/reference/cli/docker/image/save/) to save any images you want to keep to a local `.tar` file. After backing up your data, you can uninstall the current version of Docker Desktop and [install a different version](/manuals/desktop/release-notes.md) or reset Docker Desktop to factory defaults. -## Restore your data +### Restore your data 1. Load your images. @@ -55,7 +57,69 @@ and [install a different version](/manuals/desktop/release-notes.md) or reset Do $ docker image load -i images.tar ``` -2. Re-create your containers if needed, using [`docker run`](/reference/cli/docker/container/run.md), +2. Re-create your containers if needed, using [`docker run`](/reference/cli/docker/container/run/), or [Docker Compose](/manuals/compose/_index.md). To restore volume data, refer to [backup, restore, or migrate data volumes](/manuals/engine/storage/volumes.md#back-up-restore-or-migrate-data-volumes). + +## If Docker Desktop fails to start + +If Docker Desktop cannot launch and must be reinstalled, you can back up its VM disk and image data directly from disk. Docker Desktop must be fully stopped before backing up these files. + +{{< tabs >}} +{{< tab name="Windows" >}} + +1. Back up Docker containers/images. + + Backup the following file: + + ```console + %LOCALAPPDATA%\Docker\wsl\data\docker_data.vhdx + ``` + + Copy it to a safe location. + +1. Back up WSL distributions. + + If you're running any WSL Linux distributions (Ubuntu, Alpine, etc.), back them up using [Microsoft's guide](https://learn.microsoft.com/en-us/windows/wsl/faq#how-can-i-back-up-my-wsl-distributions-). + +1. Restore. + + After reinstalling Docker Desktop, restore the `docker_data.vhdx` to the same location and re-import your WSL distributions if needed. + +{{< /tab >}} +{{< tab name="Mac" >}} + +1. Back up Docker containers/images. + + Backup the following file: + + ```console + ~/Library/Containers/com.docker.docker/Data/vms/0/data/Docker.raw + ``` + + Copy it to a safe location. + +1. Restore. + + After reinstalling Docker Desktop, restore the `Docker.raw` to the same location. + +{{< /tab >}} +{{< tab name="Linux" >}} + +1. Back up Docker containers/images: + + Backup the following file: + + ```console + ~/.docker/desktop/vms/0/data/Docker.raw + ``` + + Copy it to a safe location. + +1. Restore. + + After reinstalling Docker Desktop, restore the `Docker.raw` to the same location. + +{{< /tab >}} +{{< /tabs >}} \ No newline at end of file diff --git a/content/manuals/desktop/settings-and-maintenance/settings.md b/content/manuals/desktop/settings-and-maintenance/settings.md index 662d07d83df..8dc7909e47f 100644 --- a/content/manuals/desktop/settings-and-maintenance/settings.md +++ b/content/manuals/desktop/settings-and-maintenance/settings.md @@ -12,349 +12,186 @@ aliases: weight: 10 --- -To navigate to **Settings** either: +Customize Docker Desktop behavior and optimize performance and resource usage with Docker Desktop's settings. + +To open **Settings** either: - Select the Docker menu {{< inline-image src="../images/whale-x.svg" alt="whale menu" >}} and then **Settings** - Select the **Settings** icon from the Docker Desktop Dashboard. -You can also locate the `settings-store.json` file (or `settings.json` for Docker Desktop versions 4.34 and earlier) at: +You can also locate the `settings-store.json` file at: - Mac: `~/Library/Group\ Containers/group.com.docker/settings-store.json` - Windows: `C:\Users\[USERNAME]\AppData\Roaming\Docker\settings-store.json` - Linux: `~/.docker/desktop/settings-store.json` -## General - -On the **General** tab, you can configure when to start Docker and specify other settings: - -- **Start Docker Desktop when you sign in to your computer**. Select to automatically start Docker - Desktop when you sign in to your machine. - -- **Open Docker Dashboard when Docker Desktop starts**. Select to automatically open the - dashboard when starting Docker Desktop. - -- **Choose theme for Docker Desktop**. Choose whether you want to apply a **Light** or **Dark** theme to Docker Desktop. Alternatively you can set Docker Desktop to **Use system settings**. - -- **Configure shell completions**. Automatically edits your shell configuration and gives you word completion for commands, flags, and Docker objects (such as container and volume names) when you hit `` as you type into your terminal. For more information, see [Completion](/manuals/engine/cli/completion.md). - -- **Choose container terminal**. Determines which terminal is launched when opening the terminal from a container. -If you choose the integrated terminal, you can run commands in a running container straight from the Docker Desktop Dashboard. For more information, see [Explore containers](/manuals/desktop/use-desktop/container.md). - -- **Enable Docker terminal**. Interact with your host machine and execute commands directly from Docker Desktop. - -- **Enable Docker Debug by default**. Check this option to use Docker Debug by default when accessing the integrated terminal. For more information, see [Explore containers](/manuals/desktop/use-desktop/container.md#integrated-terminal). - -- {{< badge color=blue text="Mac only" >}}**Include VM in Time Machine backups**. Select to back up the Docker Desktop - virtual machine. This option is turned off by default. - -- **Use containerd for pulling and storing images**. - Turns on the containerd image store. - This brings new features like faster container startup performance by lazy-pulling images, - and the ability to run Wasm applications with Docker. - For more information, see [containerd image store](/manuals/desktop/features/containerd.md). - -- {{< badge color=blue text="Windows only" >}}**Expose daemon on tcp://localhost:2375 without TLS**. Check this option to - enable legacy clients to connect to the Docker daemon. You must use this option - with caution as exposing the daemon without TLS can result in remote code - execution attacks. - -- {{< badge color=blue text="Windows only" >}}**Use the WSL 2 based engine**. WSL 2 provides better performance than the - Hyper-V backend. For more information, see [Docker Desktop WSL 2 backend](/manuals/desktop/features/wsl/_index.md). - -- {{< badge color=blue text="Windows only" >}}**Add the `*.docker.internal` names to the host's `/etc/hosts` file (Password required)**. Lets you resolve `*.docker.internal` DNS names from both the host and your containers. - -- {{< badge color=blue text="Mac only" >}} **Choose Virtual Machine Manager (VMM)**. Choose the Virtual Machine Manager for creating and managing the Docker Desktop Linux VM. - - Select **Docker VMM** for the latest and most performant Hypervisor/Virtual Machine Manager. This option is available only on Apple Silicon Macs running macOS 12.5 or later and is currently in Beta. - > [!TIP] - > - > Turn this setting on to make Docker Desktop run faster. - - Alternatively, you can choose **Apple Virtualization framework**, **QEMU** (for Apple Silicon), or **HyperKit** (for Intel Macs). For macOS 12.5 and later, Apple Virtualization framework is the default setting. - - For more information, see [Virtual Machine Manager](/manuals/desktop/features/vmm.md). - -- {{< badge color=blue text="Mac only" >}}**Choose file sharing implementation for your containers**. Choose whether you want to share files using **VirtioFS**, **gRPC FUSE**, or **osxfs (Legacy)**. VirtioFS is only available for macOS 12.5 and later, and is turned on by default. - > [!TIP] - > - > Use VirtioFS for speedy file sharing. VirtioFS has reduced the time taken to complete filesystem operations by [up to 98%](https://github.com/docker/roadmap/issues/7#issuecomment-1044452206). It is the only file sharing implementation supported by Docker VMM. - -- {{< badge color=blue text="Mac only" >}}**Use Rosetta for x86_64/amd64 emulation on Apple Silicon**. Turns on Rosetta to accelerate x86/AMD64 binary emulation on Apple Silicon. This option is only available if you have selected **Apple Virtualization framework** as the Virtual Machine Manager. You must also be on macOS 13 or later. - -- **Send usage statistics**. Select so Docker Desktop sends diagnostics, - crash reports, and usage data. This information helps Docker improve and - troubleshoot the application. Clear the checkbox to opt out. Docker may - periodically prompt you for more information. - -- **Use Enhanced Container Isolation**. Select to enhance security by preventing containers from breaching the Linux VM. For more information, see [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md). - > [!NOTE] - > - > This setting is only available if you are signed in to Docker Desktop and have a Docker Business subscription. - -- **Show CLI hints**. Displays CLI hints and tips when running Docker commands in the CLI. This is turned on by default. To turn CLI hints on or off from the CLI, set `DOCKER_CLI_HINTS` to `true` or `false` respectively. - -- **Enable Scout image analysis**. When this option is enabled, inspecting an image in Docker Desktop shows a **Start analysis** button that, when selected, analyzes the image with Docker Scout. +For information on enforcing settings at an organization level, see [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/settings-reference.md). -- **Enable background SBOM indexing**. When this option is enabled, Docker Scout automatically analyzes images that you build or pull. - -- {{< badge color=blue text="Mac only" >}}**Automatically check configuration**. Regularly checks your configuration to ensure no unexpected changes have been made by another application. +## General - Docker Desktop checks if your setup, configured during installation, has been altered by external apps like Orbstack. Docker Desktop checks: - - The symlinks of Docker binaries to `/usr/local/bin`. - - The symlink of the default Docker socket. - Additionally, Docker Desktop ensures that the context is switched to `desktop-linux` on startup. - - You are notified if changes are found and are able to restore the configuration directly from the notification. For more information, see the [FAQs](/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md#why-do-i-keep-getting-a-notification-telling-me-an-application-has-changed-my-desktop-configurations). +Configure startup behavior, UI appearance, terminal preferences, and feature defaults for Docker Desktop. + +| Setting | Description | Default | Platform | Notes | +| ----------------------------------------------------------------- | -------------------------------------------------------------------------- | ------------------ | ------------ | ------------------------------------- | +| **Start Docker Desktop when you sign in to your computer** | Automatically start Docker Desktop when you sign in to your machine. | Disabled | All | Recommended for frequent users. | +| **Open Docker Dashboard when Docker Desktop starts** | Automatically open the dashboard when starting Docker Desktop. | Disabled | All | | +| **Choose theme for Docker Desktop** | Apply a **Light** or **Dark** theme to Docker Desktop. | **Use system settings**. | All | | +| **Configure shell completions** | Edits your shell configuration to enable word completion for commands, flags, and Docker objects when you press `` in your terminal. For more information, see [Completion](/manuals/engine/cli/completion.md). | Disabled | All | | +| **Choose container terminal** | Sets which terminal opens when you select a container terminal. Use the integrated terminal to run commands in a running container from the Dashboard. For more information, see [Explore containers](/manuals/desktop/use-desktop/container.md). | Disabled | All | | +| **Enable Docker terminal**. | Interact with your host machine and execute commands directly from Docker Desktop. | Disabled | All | | +| **Enable Docker Debug by default** | Use Docker Debug by default opening the integrated terminal. For more information, see [Explore containers](/manuals/desktop/use-desktop/container.md#integrated-terminal). | Disabled | All | | +| **Include VM in Time Machine backups** | Back up the Docker Desktop virtual machine. | Disabled | Mac | | +| **Use containerd for pulling and storing images** | Uses containerd image store instead of classic image store. For more information, see [containerd image store](/manuals/desktop/features/containerd.md).| Enabled | All | | +| **Expose daemon on tcp://localhost:2375 without TLS** | Allow legacy clients to connect to the Docker daemon. Use with caution as exposing the daemon without TLS can result in remote code execution attacks. | Disabled | Windows (Hyper-V backend only) | | +| **Use the WSL 2 based engine** | WSL 2 provides better performance than the Hyper-V backend. For more information, see [Docker Desktop WSL 2 backend](/manuals/desktop/features/wsl/_index.md). | Disabled | Windows | | +| **Add \*.docker.internal to host file** | Adds internal DNS entries. | Enabled | Windows | Helps resolve Docker-internal domains | +| **Choose Virtual Machine Manager (VMM)** | Choose the VMM for creating and managing the Docker Desktop Linux VM. For more information, see [Virtual Machine Manager](/manuals/desktop/features/vmm.md). | | Mac | Select **Docker VMM** for the latest and most performant Hypervisor/Virtual Machine Manager. This option is available only on Apple Silicon Macs and is in Beta.| +| **Choose file sharing implementation for your containers** | Choose whether you want to share files using **VirtioFS**, **gRPC FUSE**, or **osxfs (Legacy)** | **VirtioFS** | Mac | Use VirtioFS for speedy file sharing. VirtioFS has reduced the time taken to complete filesystem operations by [up to 98%](https://github.com/docker/roadmap/issues/7#issuecomment-1044452206). It is the only file sharing implementation supported by Docker VMM. | +|**Use Rosetta for x86_64/amd64 emulation on Apple Silicon** | Accelerate x86/AMD64 binary emulation on Apple Silicon. This option is only available if you have selected **Apple Virtualization framework** as the Virtual Machine Manager. | Disabled | Mac | | +| **Send usage statistics** | Send diagnostics, crash reports, and usage data to Docker to improve and troubleshoot the application. Docker may periodically prompt you for more information. | Enabled | All | | +| **Use Enhanced Container Isolation** | Prevent containers from breaching the Linux VM. For more information, see [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md). | Disabled | All | Must be signed in and have a Docker Business subscription. | +| **Show CLI hints** | Display helpful CLI suggestions in terminal. | Enabled | All | Improves discoverability | +| **Enable Docker Scout image analysis** | Show a **Start analysis** button when inspecting an image, which analyzes the image with Docker Scout. | Enabled | All | | +| **Enable background SBOM indexing** | Automatically analyze images that you build or pull. | Disabled | All | | +| **Automatically check configuration** | Regularly check your configuration to ensure no unexpected changes have been made by another application. Notifies you if changes are found with the option to restore the configuration directly from the notification. For more information, see the [FAQs](/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md#why-do-i-keep-getting-a-notification-telling-me-an-application-has-changed-my-desktop-configurations). | Enabled | Mac | Docker Desktop checks if your setup, configured during installation, has been altered by external apps like Orbstack. Docker Desktop checks the symlinks of Docker binaries to `/usr/local/bin` and the symlink of the default Docker socket. Additionally, Docker Desktop ensures that the context is switched to `desktop-linux` on startup. | ## Resources -The **Resources** tab allows you to configure CPU, memory, disk, proxies, -network, and other resources. +Control the CPU, memory, disk, file sharing, proxy, and network resources available to Docker Desktop. ### Advanced -> [!NOTE] -> -> On Windows, the **Resource allocation** options in the **Advanced** tab are only available in Hyper-V mode, because Windows manages -> the resources in WSL 2 mode and Windows container mode. In WSL 2 -> mode, you can configure limits on the memory, CPU, and swap size allocated -> to the [WSL 2 utility VM](https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig). - -On the **Advanced** tab, you can limit resources available to the Docker Linux VM. - -Advanced settings are: - -- **CPU limit**. Specify the maximum number of CPUs to be used by Docker Desktop. - By default, Docker Desktop is set to use all the processors available on the host machine. - -- **Memory limit**. By default, Docker Desktop is set to use up to 50% of your host's - memory. To increase the RAM, set this to a higher number; to decrease it, - lower the number. - -- **Swap**. Configure swap file size as needed. The default is 1 GB. - -- **Disk usage limit**. Specify the maximum amount of disk space the engine can use. - -- **Disk image location**. Specify the location of the Linux volume where containers and images are stored. +| Setting | Description | Platform | Notes | +| ------------------- | ----------------------------------------- | -------- | ------------------------------------- | +| **CPU limit** | Specify the maximum number of CPUs to be used by Docker Desktop. | Mac, Linux, Windows Hyper-V | | +| **Memory limit** | RAM allocated to the Docker VM | Mac, Linux, Windows Hyper-V | Defaults to 50% of your host's memory. | +| **Swap** | Configure swap file size as needed. | Mac, Linux, Windows Hyper-V | 1 GB default. | +| **Disk usage limit** | Specify the maximum amount of disk space the engine can use. | Mac, Linux, Windows Hyper-V | | +| **Disk image location** | Specify the location of the Linux volume where containers and images are stored. On the **Advanced** tab, you can limit resources available to the Docker Linux VM. | Mac, Linux, Windows Hyper-V | You can also move the disk image to a different location. If you attempt to move a disk image to a location that already has one, you are asked if you want to use the existing image or replace it. | +| **Resource Saver** | Enable or disable [Resource Saver mode](/manuals/desktop/use-desktop/resource-saver.md), which significantly reduces CPU and memory utilization on the host by automatically turning off the Linux VM when Docker Desktop is idle. | Mac, Linux, Windows Hyper-V | Restarts automatically when containers run. Restart may take 3–10 seconds. | - You can also move the disk image to a different location. If you attempt to - move a disk image to a location that already has one, you are asked if you - want to use the existing image or replace it. +In WSL 2 mode, configure memory, CPU, and swap limits on the [WSL 2 utility VM](https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig). ->[!TIP] +> [!TIP] > > If you feel Docker Desktop starting to get slow or you're running -> multi-container workloads, increase the memory and disk image space allocation - -- **Resource Saver**. Enable or disable [Resource Saver mode](/manuals/desktop/use-desktop/resource-saver.md), - which significantly reduces CPU and memory utilization on the host by - automatically turning off the Linux VM when Docker Desktop is idle (i.e., no - containers are running). - - You can also configure the Resource Saver timeout which indicates how long - should Docker Desktop be idle before Resource Saver mode kicks in. Default is - 5 minutes. - - > [!NOTE] - > - > Exit from Resource Saver mode occurs automatically when containers run. Exit - > may take a few seconds (~3 to 10 secs) as Docker Desktop restarts the Linux VM. - +> multi-container workloads, increase the memory and disk image space allocation. ### File sharing -> [!NOTE] -> -> On Windows, the **File sharing** tab is only available in Hyper-V mode because the files -> are automatically shared in WSL 2 mode and Windows container mode. - Use File sharing to allow local directories on your machine to be shared with Linux containers. This is especially useful for editing source code in an IDE on the host while running and testing the code in a container. -#### Synchronized file shares - -Synchronized file shares is an alternative file sharing mechanism that provides fast and flexible host-to-VM file sharing, enhancing bind mount performance through the use of synchronized filesystem caches. Available with Pro, Team, and Business subscriptions. - -To learn more, see [Synchronized file share](/manuals/desktop/features/synchronized-file-sharing.md). - -#### Virtual file shares - -By default the `/Users`, `/Volumes`, `/private`, `/tmp` and `/var/folders` directory are shared. -If your project is outside this directory then it must be added to the list, -otherwise you may get `Mounts denied` or `cannot start service` errors at runtime. - -File share settings are: - -- **Add a Directory**. Select `+` and navigate to the directory you want to add. - -- **Remove a Directory**. Select `-` next to the directory you want to remove - -- **Apply & Restart** makes the directory available to containers using Docker's - bind mount (`-v`) feature. - -> [!TIP] -> -> * Share only the directories that you need with the container. File sharing -> introduces overhead as any changes to the files on the host need to be notified -> to the Linux VM. Sharing too many files can lead to high CPU load and slow -> filesystem performance. -> * Shared folders are designed to allow application code to be edited -> on the host while being executed in containers. For non-code items -> such as cache directories or databases, the performance will be much -> better if they are stored in the Linux VM, using a [data volume](/manuals/engine/storage/volumes.md) -> (named volume) or [data container](/manuals/engine/storage/volumes.md). -> * If you share the whole of your home directory into a container, MacOS may -> prompt you to give Docker access to personal areas of your home directory such as -> your Reminders or Downloads. -> * By default, Mac file systems are case-insensitive while Linux is case-sensitive. -> On Linux, it is possible to create two separate files: `test` and `Test`, -> while on Mac these filenames would actually refer to the same underlying -> file. This can lead to problems where an app works correctly on a developer's -> machine (where the file contents are shared) but fails when run in Linux in -> production (where the file contents are distinct). To avoid this, Docker Desktop -> insists that all shared files are accessed as their original case. Therefore, -> if a file is created called `test`, it must be opened as `test`. Attempts to -> open `Test` will fail with the error "No such file or directory". Similarly, -> once a file called `test` is created, attempts to create a second file called -> `Test` will fail. -> -> For more information, see [Volume mounting requires file sharing for any project directories outside of `/Users`](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md) - -#### Shared folders on demand - -On Windows, you can share a folder "on demand" the first time a particular folder is used by a container. - -If you run a Docker command from a shell with a volume mount (as shown in the -example below) or kick off a Compose file that includes volume mounts, you get a -popup asking if you want to share the specified folder. - -You can select to **Share it**, in which case it is added to your Docker Desktop Shared Folders list and available to -containers. Alternatively, you can opt not to share it by selecting **Cancel**. - -![Shared folder on demand](../images/shared-folder-on-demand.png) +| Setting | Description | Platform | Notes | +| ------------------- | ----------------------------------------- | -------- | ------------------------------------- | +| **Synchronized file shares** | Fast and flexible host-to-VM file sharing, enhancing bind mount performance through the use of synchronized filesystem caches. To learn more, see [Synchronized file share](/manuals/desktop/features/synchronized-file-sharing.md). | Mac, Linux, Windows Hyper-V | Available with Pro, Team, and Business subscriptions. | +| **Virtual file shares** | Share local directories with Linux containers. By default the `/Users`, `/Volumes`, `/private`, `/tmp` and `/var/folders` directory are shared. If your project is outside this directory then it must be added to the list, otherwise you may get `Mounts denied` or `cannot start service` errors at runtime. | Mac, Linux, Windows Hyper-V | | + + +- Share only the directories that you need with the container. File sharing +introduces overhead as any changes to the files on the host need to be notified +to the Linux VM. Sharing too many files can lead to high CPU load and slow +filesystem performance. +- Shared folders are designed to allow application code to be edited +on the host while being executed in containers. For non-code items +such as cache directories or databases, the performance will be much +better if they are stored in the Linux VM, using a [data volume](/manuals/engine/storage/volumes.md) +(named volume) or [data container](/manuals/engine/storage/volumes.md). +- If you share the whole of your home directory into a container, Mac may +prompt you to give Docker access to personal areas of your home directory such as +your Reminders or Downloads. +- By default, Mac file systems are case-insensitive while Linux is case-sensitive. +On Linux, it is possible to create two separate files: `test` and `Test`, +while on Mac these filenames would actually refer to the same underlying +file. This can lead to problems where an app works correctly on a developer's +machine (where the file contents are shared) but fails when run in Linux in +production (where the file contents are distinct). To avoid this, Docker Desktop +insists that all shared files are accessed as their original case. Therefore, +if a file is created called `test`, it must be opened as `test`. Attempts to +open `Test` will fail with the error "No such file or directory". Similarly, +once a file called `test` is created, attempts to create a second file called +`Test` will fail. + +For more information, see [Volume mounting requires file sharing for any project directories outside of `/Users`](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md). ### Proxies -Docker Desktop supports the use of HTTP/HTTPS and [SOCKS5 proxies](/manuals/desktop/features/networking.md#socks5-proxy-support). - -HTTP/HTTPS proxies can be used when: +Docker Desktop supports HTTP/HTTPS and SOCKS5 proxies. SOCKS5 requires a Business subscription. -- Signing in to Docker -- Pulling or pushing images -- Fetching artifacts during image builds -- Containers interact with the external network -- Scanning images - -If the host uses a HTTP/HTTPS proxy configuration (static or via Proxy Auto-Configuration (PAC)), Docker Desktop reads -this configuration -and automatically uses these settings for signing in to Docker, for pulling and pushing images, and for -container Internet access. If the proxy requires authorization then Docker Desktop dynamically asks -the developer for a username and password. All passwords are stored securely in the OS credential store. -Note that only the `Basic` proxy authentication method is supported so we recommend using an `https://` -URL for your HTTP/HTTPS proxies to protect passwords while in transit on the network. Docker Desktop -supports TLS 1.3 when communicating with proxies. +To prevent developers from accidentally changing the proxy settings, see +[Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md#what-features-can-i-configure-with-settings-management). -To set a different proxy for Docker Desktop, turn on **Manual proxy configuration** and enter a single -upstream proxy URL of the form `http://proxy:port` or `https://proxy:port`. +#### Docker Desktop proxy -To prevent developers from accidentally changing the proxy settings, see -[Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md#what-features-can-i-configure-with-settings-management). +Used for signing in to Docker, pulling and pushing images, fetching artifacts during image builds, and reporting error diagnostics. -The HTTPS proxy settings used for scanning images are set using the `HTTPS_PROXY` environment variable. +| Proxy mode | Description | +|------------|-------------| +| **System proxy** | Use the proxy configured on the host (static or Proxy Auto-Configuration (PAC)). Docker Desktop reads this automatically. | +| **No proxy** | Connect directly without a proxy. | +| **Manual configuration** | Enter a **Web Server (HTTP)** and **Secure Web Server (HTTPS)** URL manually. Use the format `http://proxy:port` or `https://proxy:port`. You can also specify hosts and domains that should bypass the proxy, for example: `registry-1.docker.com,*.docker.com,10.0.0.0/8`. | > [!NOTE] > -> If you are using a PAC file hosted on a web server, make sure to add the MIME type `application/x-ns-proxy-autoconfig` for the `.pac` file extension on the server or website. Without this configuration, the PAC file may not be parsed correctly. - -> [!IMPORTANT] -> You cannot configure the proxy settings using the Docker daemon configuration -> file (`daemon.json`), and we recommend you do not configure the proxy -> settings via the Docker CLI configuration file (`config.json`). -> -> To manage proxy configurations for Docker Desktop, configure the settings in -> the Docker Desktop app or use [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). - -#### Proxy authentication - -##### Basic authentication +> If you use a PAC file hosted on a web server, add the MIME type `application/x-ns-proxy-autoconfig` for the `.pac` extension. Without this, the PAC file may not parse correctly. See [Hardened Docker Desktop](/manuals/enterprise/security/hardened-desktop/air-gapped-containers.md#proxy-auto-configuration-files). -If your proxy uses Basic authentication, Docker Desktop prompts developers for a username and password and caches the credentials. All passwords are stored securely in the OS credential store. It will request re-authentication if that cache is removed. +#### Containers proxy -It's recommended that you use an `https://` URL for HTTP/HTTPS proxies to protect passwords during network transit. Docker Desktop also supports TLS 1.3 for communication with proxies. +Used for outbound traffic from running containers. -##### Kerberos and NTLM authentication +| Proxy mode | Description | +|------------|-------------| +| **Same as host proxy** | Use the same proxy configuration as the Docker Desktop proxy. | +| **System proxy** | Use the proxy configured on the host. | +| **No proxy** | Connect directly without a proxy. | +| **Manual configuration** | Enter a **Web Server (HTTP)** and **Secure Web Server (HTTPS)** URL manually. Use the format `http://proxy:port` or `https://proxy:port`. You can also specify hosts and domains that should bypass the proxy, for example: `registry-1.docker.com,*.docker.com,10.0.0.0/8`. | > [!NOTE] > -> Available for Docker Business subscribers with Docker Desktop for Windows version 4.30 and later. +> The HTTPS proxy used for image scanning is configured using the `HTTPS_PROXY` environment variable. -Developers are no longer interrupted by prompts for proxy credentials as authentication is centralized. This also reduces the risk of account lockouts due to incorrect sign in attempts. - -If your proxy offers multiple authentication schemes in 407 (Proxy Authentication Required) response, Docker Desktop by default selects the Basic authentication scheme. - -For Docker Desktop version 4.30 to 4.31: - -To enable Kerberos or NTLM proxy authentication, no additional configuration is needed beyond specifying the proxy IP address and port. - -For Docker Desktop version 4.32 and later: +#### Proxy authentication -To enable Kerberos or NTLM proxy authentication you must pass the `--proxy-enable-kerberosntlm` installer flag during installation via the command line, and ensure your proxy server is properly configured for Kerberos or NTLM authentication. +| Method | Behavior | Notes | +|--------|-----------| ----- | +| **Basic** | Docker Desktop prompts for credentials and caches them in the OS credential store. | Use an `https://` proxy URL to protect passwords in transit. Supports TLS 1.3. | +| **Kerberos / NTLM** | Centralizes authentication — developers aren't prompted for credentials, reducing the risk of account lockouts. If the proxy returns multiple schemes in a 407 response, Docker Desktop defaults to Basic. | Requires a Business subscription. To enable Kerberos or NTLM proxy authentication you must pass the `--proxy-enable-kerberosntlm` installer flag during installation via the command line, and ensure your proxy server is properly configured for Kerberos or NTLM authentication. | ### Network > [!NOTE] > -> On Windows, the **Network** tab isn't available in the Windows container mode because -> Windows manages networking. - -Docker Desktop uses a private IPv4 network for internal services such as a DNS server and an HTTP proxy. In case Docker Desktop's choice of subnet clashes with IPs in your environment, you can specify a custom subnet using the **Network** setting. +> On Windows, the **Network** tab is not available in Windows container mode because Windows manages networking. -On Mac, you can also select the **Use kernel networking for UDP** setting. This lets you use a more efficient kernel networking path for UDP. This may not be compatible with your VPN software. +| Setting | Description | Platform | +|---------|-------------|----------| +| **Docker subnet** | Set a custom subnet to avoid conflicts with IPs in your environment. Docker Desktop uses a private IPv4 network for internal services, including a DNS server and HTTP proxy. Default: `192.168.65.0/24`. | All | +| **Use kernel networking for UDP** | Use a more efficient kernel networking path for UDP traffic. May not be compatible with VPN software. | Mac | +| **Enable host networking** | Allows containers started with `--net=host` to use `localhost` to connect to TCP and UDP services on the host. Also allows host software to use `localhost` to connect to TCP and UDP services in the container. | Mac | -### WSL Integration +On Windows and Mac, you can also set the default networking mode and DNS resolution behavior. For more information, see [Networking](/manuals/desktop/features/networking/networking-how-tos.md#network-how-tos-for-mac-and-windows). -On Windows in WSL 2 mode, you can configure which WSL 2 distributions will have the Docker -WSL integration. +### WSL integration (Windows only) -By default, the integration is enabled on your default WSL distribution. -To change your default WSL distribution, run `wsl --set-default `. (For example, -to set Ubuntu as your default WSL distribution, run `wsl --set-default ubuntu`). - -You can also select any additional distributions you would like to enable the WSL 2 integration on. +| Setting | Description | Notes | +| ------------------- | ----------------------------------------- | ------------------------------------- | +| WSL distribution integration| Select which WSL 2 distributions have Docker WSL integration enabled. | Integration is enabled on your default WSL distribution by default. To change your default distribution, run `wsl --set-default `. | For more details on configuring Docker Desktop to use WSL 2, see [Docker Desktop WSL 2 backend](/manuals/desktop/features/wsl/_index.md). ## Docker Engine -The **Docker Engine** tab allows you to configure the Docker daemon used to run containers with Docker Desktop. - -You configure the daemon using a JSON configuration file. Here's what the file might look like: - -```json -{ - "builder": { - "gc": { - "defaultKeepStorage": "20GB", - "enabled": true - } - }, - "experimental": false -} -``` +Configure the Docker daemon using a JSON configuration file. -You can find this file at `$HOME/.docker/daemon.json`. To change the configuration, either -edit the JSON configuration directly from the dashboard in Docker Desktop, or open and -edit the file using your favorite text editor. +The file is located at `$HOME/.docker/daemon.json`. Edit it directly in the Docker Desktop Dashboard or in a text editor. To see the full list of possible configuration options, see the [dockerd command reference](/reference/cli/dockerd/). -Select **Apply & Restart** to save your settings and restart Docker Desktop. - ## Builders -If you have turned on the -[Docker Desktop Builds view](/manuals/desktop/use-desktop/builds.md), you can use the -**Builders** tab to inspect and manage builders in the Docker Desktop settings. +Use the **Builders** tab to inspect and manage builders in the Docker Desktop settings. ### Inspect @@ -422,61 +259,41 @@ You can only start and stop builders using the `docker-container` driver. > > On Windows the **Kubernetes** tab is not available in Windows container mode. -Docker Desktop includes a standalone Kubernetes server, so that you can test -deploying your Docker workloads on Kubernetes. To turn on Kubernetes support and -install a standalone instance of Kubernetes running as a Docker container, -select **Enable Kubernetes**. - -With Docker Desktop version 4.38 and later, you can choose your cluster provisioning method: - - **Kubeadm** creates a single-node cluster and the version is set by Docker Desktop. - - **kind** creates a multi-node cluster and you can set the version and number of nodes. +Enable and configure the built-in standalone Kubernetes cluster for testing container deployments. -Select **Show system containers (advanced)** to view internal containers when -using Docker commands. - -Select **Reset Kubernetes cluster** to delete all stacks and Kubernetes resources. +| Setting | Description | +| ------------------- | ----------------------------------------- | +| **Enable Kubernetes** | Install and run a standalone Kubernetes server as a Docker container for testing deployments. | +| **Cluster provisioning method** | Choose either **Kubeadm**, a single-node cluster with the version set by Docker Desktop, or **Kind**, a multi-node cluster where you can set the version and number of nodes. | +| **Show system containers (advanced)** | Show internal containers when using Docker commands. | +| **Reset Kubernetes cluster** | Delete all stacks and Kubernetes resources. | For more information about using the Kubernetes integration with Docker Desktop, -see [Deploy on Kubernetes](/manuals/desktop/features/kubernetes.md). - -## Software Updates - -The **Software Updates** tab notifies you of any updates available to Docker Desktop. -When there's a new update, you can choose to download the update right away, or -select the **Release Notes** option to learn what's included in the updated version. +see [Explore the Kubernetes view](/manuals/desktop/use-desktop/kubernetes.md). -Turn off the check for updates by clearing the **Automatically check for updates** -check box. This disables notifications in the Docker menu and the notification -badge that appears on the Docker Desktop Dashboard. To check for updates manually, select -the **Check for updates** option in the Docker menu. +## Software updates -To allow Docker Desktop to automatically download new updates in the background, -select **Always download updates**. This downloads newer versions of Docker Desktop -when an update becomes available. After downloading the update, select -**Apply and Restart** to install the update. You can do this either through the -Docker menu or in the **Updates** section in the Docker Desktop Dashboard. +Manage how and when Docker Desktop checks for and downloads updates. -> [!TIP] -> -> With Docker Desktop version 4.38 and later, components of Docker Desktop, such as Docker Compose, Docker Scout, and the Docker CLI, can be updated independently without the need for a full restart. This feature is still in Beta. +| Setting | Description | Default | +| ------------------- | ----------------------------------------- | ------------------------------------- | +| **Automatically check for updates** | Notifies you of available updates in the Docker menu and Dashboard footer. | Enabled | +| **Always download updates** | Automatically download new versions of Docker Desktop in the background. | Disabled | +| **Automatically update components** | Update Docker Desktop components (such as Docker Compose, Docker Scout, and the Docker CLI) independently, without a full restart. | Enabled | ## Extensions -Use the **Extensions** tab to: - -- **Enable Docker Extensions** -- **Allow only extensions distributed through the Docker Marketplace** -- **Show Docker Extensions system containers** - -For more information about Docker extensions, see [Extensions](/manuals/extensions/_index.md). +Enable Docker Extensions and control which extensions are available to install and run. -## Features in development +| Setting | Description | +| ------------------- | ----------------------------------------- | +| **Enable Docker Extensions** | Turn Docker Extensions on or off. | +| **Allow only extensions distributed through the Docker Marketplace** | Restrict extensions to Marketplace-approved sources only. | +| **Show Docker Extensions system containers** | Show containers used by Docker Extensions. | -On the **Feature control** tab you can control your settings for **Beta features** and **Experimental features**. +For more information about Docker extensions, see [Docker Extensions](/manuals/extensions/_index.md). -You can also sign up to the [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview/) from the **Features in development** tab. - -### Beta features +## Beta features Beta features provide access to future product functionality. These features are intended for testing and feedback only as they may change @@ -484,47 +301,42 @@ between releases without warning or remove them entirely from a future release. Beta features must not be used in production environments. Docker doesn't offer support for beta features. -### Experimental features - -Experimental features provide early access to future product functionality. -These features are intended for testing and feedback only as they may change -between releases without warning or can be removed entirely from a future -release. Experimental features must not be used in production environments. -Docker does not offer support for experimental features. +You can also sign up to the [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview/) from the **Beta features** tab. For a list of current experimental features in the Docker CLI, see [Docker CLI Experimental features](https://github.com/docker/cli/blob/master/experimental/README.md). ## Notifications -Use the **Notifications** tab to turn on or turn off notifications for the following events: - -- **Status updates on tasks and processes** -- **Recommendations from Docker** -- **Docker announcements** -- **Docker surveys** - -By default, all general notifications are turned on. You'll always receive error notifications and notifications about new Docker Desktop releases and updates. +Choose which types of Docker Desktop notifications you want to receive. -You can also [configure notification settings for Docker Scout-related issues](/manuals/scout/explore/dashboard.md#notification-settings). +| Notification type | Default| +| ----------------- | ------ | +| Status updates on tasks and processes | Enabled | +| Recommendations from Docker | Enabled | +| Docker announcements | Enabled | +| Docker surveys | Enabled | +| Error notifications | Always Enabled (cannot be changed) | +| New releases | Always Enabled (cannot be changed) | -Notifications momentarily appear in the lower-right of the Docker Desktop Dashboard and then move to the **Notifications** drawer which can be accessed from the top-right of the Docker Desktop Dashboard. +Notifications appear briefly in the lower-right of the Docker Desktop Dashboard, then move to the **Notifications** drawer, accessible from the top-right of the Dashboard. -## Advanced +## Advanced (Mac only) -On Mac, you can reconfigure your initial installation settings on the **Advanced** tab: +Reconfigure CLI tool installation paths and privileged system permissions set during initial install. -- **Choose how to configure the installation of Docker's CLI tools**. - - **System**: Docker CLI tools are installed in the system directory under `/usr/local/bin` - - **User**: Docker CLI tools are installed in the user directory under `$HOME/.docker/bin`. You must then add `$HOME/.docker/bin` to your PATH. To add `$HOME/.docker/bin` to your path: - 1. Open your shell configuration file. This is `~/.bashrc` if you're using a bash shell, or `~/.zshrc` if you're using a zsh shell. - 2. Copy and paste the following: - ```console - $ export PATH=$PATH:~/.docker/bin - ``` - 3. Save and the close the file. Restart your shell to apply the changes to the PATH variable. +| Setting | Description | Notes | +| ------------------- | ----------------------------------------- | ------------------------------------- | +| CLI tools installation — **System** | Install Docker CLI tools to `/usr/local/bin`. | | +| CLI tools installation — **User** | Install Docker CLI tools to `$HOME/.docker/bin` | Add `$HOME/.docker/bin` to your PATH by appending `export PATH=$PATH:~/.docker/bin` to `~/.bashrc` or `~/.zshrc`, then restart your shell. | +| **Allow the default Docker socket to be used** | Creates `/var/run/docker.sock` which some third party clients may use to communicate with Docker Desktop. For more information, see [permission requirements for macOS](/manuals/desktop/setup/install/mac-permission-requirements.md#installing-symlinks). | Requires password | +| **Allow privileged port mapping** | Starts the privileged helper process which binds the ports that are between 1 and 1024. For more information, see [permission requirements for macOS](/manuals/desktop/setup/install/mac-permission-requirements.md#binding-privileged-ports). | Requires password | -- **Allow the default Docker socket to be used (Requires password)**. Creates `/var/run/docker.sock` which some third party clients may use to communicate with Docker Desktop. For more information, see [permission requirements for macOS](/manuals/desktop/setup/install/mac-permission-requirements.md#installing-symlinks). +## Docker Offload -- **Allow privileged port mapping (Requires password)**. Starts the privileged helper process which binds the ports that are between 1 and 1024. For more information, see [permission requirements for macOS](/manuals/desktop/setup/install/mac-permission-requirements.md#binding-privileged-ports). +Enable Docker Offload and configure idle timeout and GPU support for cloud-based workloads. -For more information on each configuration and use case, see [Permission requirements](/manuals/desktop/setup/install/mac-permission-requirements.md). +| Setting | Description | Notes | +| ------------------- | ----------------------------------------- | ------------------------------------- | +| **Enable Docker Offload** | Run your containers in the cloud. | Requires sign-in and an Offload subscription | +| **Idle timeout** | Set the duration of time between no activity and Docker Offload entering idle mode. For details about idle timeout, see [Active and idle states](../../offload/configuration.md#understand-active-and-idle-states). | | +| **Enable GPU support** | Let your workloads use cloud GPU if available. | | diff --git a/content/manuals/desktop/setup/allow-list.md b/content/manuals/desktop/setup/allow-list.md index 3858c242f36..aa3f8381b16 100644 --- a/content/manuals/desktop/setup/allow-list.md +++ b/content/manuals/desktop/setup/allow-list.md @@ -17,13 +17,12 @@ This page contains the domain URLs that you need to add to a firewall allowlist | Domains | Description | | ------------------------------------------------------------------------------------ | -------------------------------------------- | -| https://api.segment.io | Analytics | -| https://cdn.segment.com | Analytics | | https://notify.bugsnag.com | Error reports | | https://sessions.bugsnag.com | Error reports | | https://auth.docker.io | Authentication | | https://cdn.auth0.com | Authentication | | https://login.docker.com | Authentication | +| https://auth.docker.com | Authentication | | https://desktop.docker.com | Update | | https://hub.docker.com | Docker Hub | | https://registry-1.docker.io | Docker Pull/Push | @@ -32,3 +31,5 @@ This page contains the domain URLs that you need to add to a firewall allowlist | https://docker-pinata-support.s3.amazonaws.com | Troubleshooting | | https://api.dso.docker.com | Docker Scout service | | https://api.docker.com | New API | +| https://dhi.io | Docker Hardened Images | +| https://registry.scout.docker.com | Docker Scout registry for DHI attestations | diff --git a/content/manuals/desktop/setup/install/_index.md b/content/manuals/desktop/setup/install/_index.md index 44d7b98a8cc..186d97b05dd 100644 --- a/content/manuals/desktop/setup/install/_index.md +++ b/content/manuals/desktop/setup/install/_index.md @@ -3,7 +3,4 @@ build: render: never title: Install weight: 10 -aliases: -- /desktop/install/ -- /desktop/setup/install/ --- diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/_index.md b/content/manuals/desktop/setup/install/enterprise-deployment/_index.md deleted file mode 100644 index 588ccbcae81..00000000000 --- a/content/manuals/desktop/setup/install/enterprise-deployment/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Enterprise deployment -weight: 50 -description: If you're an IT admin, learn how to deploy Docker Desktop at scale -keywords: msi, docker desktop, windows, installation, mac, pkg, enterprise -build: - render: never ---- \ No newline at end of file diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/faq.md b/content/manuals/desktop/setup/install/enterprise-deployment/faq.md deleted file mode 100644 index d2b7cacbcb8..00000000000 --- a/content/manuals/desktop/setup/install/enterprise-deployment/faq.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Enterprise deployment FAQs -linkTitle: FAQs -description: Frequently asked questions for deploying Docker Desktop at scale -keywords: msi, deploy, docker desktop, faqs, pkg, mdm, jamf, intune, windows, mac, enterprise, admin -tags: [FAQ, admin] -aliases: -- /desktop/install/msi/faq/ -- /desktop/setup/install/msi/faq/ ---- - -## MSI - -Common questions about installing Docker Desktop using the MSI installer. - -### What happens to user data if they have an older Docker Desktop installation (i.e. `.exe`)? - -Users must [uninstall](/manuals/desktop/uninstall.md) older `.exe` installations before using the new MSI version. This deletes all Docker containers, images, volumes, and other Docker-related data local to the machine, and removes the files generated by Docker Desktop. - -To preserve existing data before uninstalling, users should [backup](/manuals/desktop/settings-and-maintenance/backup-and-restore.md) their containers and volumes. - -For Docker Desktop 4.30 and later, the `.exe` installer includes a `-keep-data` flag that removes Docker Desktop while preserving underlying resources such as the container VMs: - -```powershell -& 'C:\Program Files\Docker\Docker\Docker Desktop Installer.exe' uninstall -keep-data -``` - -### What happens if the user's machine has an older `.exe` installation? - -The MSI installer detects older `.exe` installations and blocks the installation until the previous version is uninstalled. It prompts the user to uninstall their current/old version first, before retrying to install the MSI version. - -### My installation failed, how do I find out what happened? - -MSI installations may fail silently, offering little diagnostic feedback. - -To debug a failed installation, run the install again with verbose logging enabled: - -```powershell -msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" -``` - -After the installation has failed, open the log file and search for occurrences of `value 3`. This is the exit code Windows Installer outputs when it has failed. Just above the line, you will find the reason for the failure. - -### Why does the installer prompt for a reboot at the end of every fresh installation? - -The installer prompts for a reboot because it assumes that changes have been made to the system that require a reboot to finish their configuration. - -For example, if you select the WSL engine, the installer adds the required Windows features. After these features are installed, the system reboots to complete configurations so the WSL engine is functional. - -You can suppress reboots by using the `/norestart` option when launching the installer from the command line: - -```powershell -msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /norestart -``` - -### Why isn't the `docker-users` group populated when the MSI is installed with Intune or another MDM solution? - -It's common for MDM solutions to install applications in the context of the system account. This means that the `docker-users` group isn't populated with the user's account, as the system account doesn't have access to the user's context. - -As an example, you can reproduce this by running the installer with `psexec` in an elevated command prompt: - -```powershell -psexec -i -s msiexec /i "DockerDesktop.msi" -``` -The installation should complete successfully, but the `docker-users` group won't be populated. - -As a workaround, you can create a script that runs in the context of the user account. - -The script would be responsible for creating the `docker-users` group and populating it with the correct user. - -Here's an example script that creates the `docker-users` group and adds the current user to it (requirements may vary depending on environment): - -```powershell -$Group = "docker-users" -$CurrentUser = [System.Security.Principal.WindowsIdentity]::GetCurrent().Name - -# Create the group -New-LocalGroup -Name $Group - -# Add the user to the group -Add-LocalGroupMember -Group $Group -Member $CurrentUser -``` - -> [!NOTE] -> -> After adding a new user to the `docker-users` group, the user must sign out and then sign back in for the changes to take effect. - -## MDM - -Common questions about deploying Docker Desktop using mobile device management -(MDM) tools such as Jamf, Intune, or Workspace ONE. - -### Why doesn't my MDM tool apply all Docker Desktop configuration settings at once? - -Some MDM tools, such as Workspace ONE, may not support applying multiple -configuration settings in a single XML file. In these cases, you may need to -deploy each setting in a separate XML file. - -Refer to your MDM provider's documentation for specific deployment -requirements or limitations. \ No newline at end of file diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md b/content/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md deleted file mode 100644 index 48750fc0845..00000000000 --- a/content/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md +++ /dev/null @@ -1,246 +0,0 @@ ---- -title: MSI installer -description: Understand how to use the MSI installer. Also explore additional configuration options. -keywords: msi, windows, docker desktop, install, deploy, configure, admin, mdm -tags: [admin] -weight: 10 -aliases: -- /desktop/install/msi/install-and-configure/ -- /desktop/setup/install/msi/install-and-configure/ -- /desktop/install/msi/ -- /desktop/setup/install/msi/ ---- - -{{< summary-bar feature_name="MSI installer" >}} - -The MSI package supports various MDM (Mobile Device Management) solutions, making it ideal for bulk installations and eliminating the need for manual setups by individual users. With this package, IT administrators can ensure standardized, policy-driven installations of Docker Desktop, enhancing efficiency and software management across their organizations. - -## Install interactively - -1. In the [Docker Admin Console](http://admin.docker.com/), navigate to your organization. -2. Under **Docker Desktop**, select the **Deploy** page. -3. From the **Windows OS** tab, select the **Download MSI installer** button. -4. Once downloaded, double-click `Docker Desktop Installer.msi` to run the installer. -5. After accepting the license agreement, choose the install location. By default, Docker Desktop is installed at `C:\Program Files\Docker\Docker`. -6. Configure the Docker Desktop installation. You can: - - - Create a desktop shortcut - - - Set the Docker Desktop service startup type to automatic - - - Disable Windows Container usage - - - Select the Docker Desktop backend: WSL or Hyper-V. If only one is supported by your system, you won't be able to choose. -7. Follow the instructions on the installation wizard to authorize the installer and proceed with the install. -8. When the installation is successful, select **Finish** to complete the installation process. - -If your administrator account is different from your user account, you must add the user to the **docker-users** group: -1. Run **Computer Management** as an **administrator**. -2. Navigate to **Local Users and Groups** > **Groups** > **docker-users**. -3. Right-click to add the user to the group. -4. Sign out and sign back in for the changes to take effect. - -> [!NOTE] -> -> When installing Docker Desktop with the MSI, in-app updates are automatically disabled. This ensures organizations can maintain version consistency and prevent unapproved updates. For Docker Desktop installed with the .exe installer, in-app updates remain supported. -> -> Docker Desktop notifies you when an update is available. To update Docker Desktop, download the latest installer from the Docker Admin Console. Navigate to the **Deploy** page > under **Docker Desktop**. -> -> To keep up to date with new releases, check the [release notes](/manuals/desktop/release-notes.md) page. - -## Install from the command line - -This section covers command line installations of Docker Desktop using PowerShell. It provides common installation commands that you can run. You can also add additional arguments which are outlined in [configuration options](#configuration-options). - -When installing Docker Desktop, you can choose between interactive or non-interactive installations. - -Interactive installations, without specifying `/quiet` or `/qn`, display the user interface and let you select your own properties. - -When installing via the user interface it's possible to: - -- Choose the destination folder -- Create a desktop shortcut -- Configure the Docker Desktop service startup type -- Disable Windows Containers -- Choose between the WSL or Hyper-V engine - -Non-interactive installations are silent and any additional configuration must be passed as arguments. - -### Common installation commands - -> [!IMPORTANT] -> -> Admin rights are required to run any of the following commands. - -#### Installing interactively with verbose logging - -```powershell -msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" -``` - -#### Installing interactively without verbose logging - -```powershell -msiexec /i "DockerDesktop.msi" -``` - -#### Installing non-interactively with verbose logging - -```powershell -msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /quiet -``` - -#### Installing non-interactively and suppressing reboots - -```powershell -msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /quiet /norestart -``` - -#### Installing non-interactively with admin settings - -```powershell -msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /quiet /norestart ADMINSETTINGS="{"configurationFileVersion":2,"enhancedContainerIsolation":{"value":true,"locked":false}}" ALLOWEDORG="docker" -``` - -#### Installing with the passive display option - -You can use the `/passive` display option instead of `/quiet` when you want to perform a non-interactive installation but show a progress dialog. - -In passive mode the installer doesn't display any prompts or error messages to the user and the installation cannot be cancelled. - -For example: - -```powershell -msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /passive /norestart -``` - -> [!TIP] -> -> When creating a value that expects a JSON string: -> -> - The property expects a JSON formatted string -> - The string should be wrapped in double quotes -> - The string shouldn't contain any whitespace -> - Property names are expected to be in double quotes - -### Common uninstall commands - -When uninstalling Docker Desktop, you need to use the same `.msi` file that was originally used to install the application. - -If you no longer have the original `.msi` file, you need to use the product code associated with the installation. To find the product code, run: - -```powershell -Get-WmiObject Win32_Product | Select-Object IdentifyingNumber, Name | Where-Object {$_.Name -eq "Docker Desktop"} -``` - -It should return output similar to the following: - -```text -IdentifyingNumber Name ------------------ ---- -{10FC87E2-9145-4D7D-B493-2E99E8D8E103} Docker Desktop -``` -> [!NOTE] -> -> This command may take some time, depending on the number of installed applications. - -`IdentifyingNumber` is the applications product code and can be used to uninstall Docker Desktop. For example: - -```powershell -msiexec /x {10FC87E2-9145-4D7D-B493-2E99E8D8E103} /L*V ".\msi.log" /quiet -``` - -#### Uninstalling interactively with verbose logging - -```powershell -msiexec /x "DockerDesktop.msi" /L*V ".\msi.log" -``` - -#### Uninstalling interactively without verbose logging - -```powershell -msiexec /x "DockerDesktop.msi" -``` - -#### Uninstalling non-interactively with verbose logging - -```powershell -msiexec /x "DockerDesktop.msi" /L*V ".\msi.log" /quiet -``` - -#### Uninstalling non-interactively without verbose logging - -```powershell -msiexec /x "DockerDesktop.msi" /quiet -``` - -### Configuration options - -> [!IMPORTANT] -> -> In addition to the following custom properties, the Docker Desktop MSI installer also supports the standard [Windows Installer command line options](https://learn.microsoft.com/en-us/windows/win32/msi/standard-installer-command-line-options). - -| Property | Description | Default | -| :--- | :--- | :--- | -| `ENABLEDESKTOPSHORTCUT` | Creates a desktop shortcut. | 1 | -| `INSTALLFOLDER` | Specifies a custom location where Docker Desktop will be installed. | C:\Program Files\Docker | -| `ADMINSETTINGS` | Automatically creates an `admin-settings.json` file which is used to [control certain Docker Desktop settings](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) on client machines within organizations. It must be used together with the `ALLOWEDORG` property. | None | -| `ALLOWEDORG` | Requires the user to sign in and be part of the specified Docker Hub organization when running the application. This creates a registry key called `allowedOrgs` in `HKLM\Software\Policies\Docker\Docker Desktop`. | None | -| `ALWAYSRUNSERVICE` | Lets users switch to Windows containers without needing admin rights | 0 | -| `DISABLEWINDOWSCONTAINERS` | Disables the Windows containers integration | 0 | -| `ENGINE` | Sets the Docker Engine that's used to run containers. This can be either `wsl` , `hyperv`, or `windows` | `wsl` | -| `PROXYENABLEKERBEROSNTLM` | When set to 1, enables support for Kerberos and NTLM proxy authentication. Available with Docker Desktop 4.33 and later| 0 | -| `PROXYHTTPMODE` | Sets the HTTP Proxy mode. This can be either `system` or `manual` | `system` | -| `OVERRIDEPROXYHTTP` | Sets the URL of the HTTP proxy that must be used for outgoing HTTP requests. | None | -| `OVERRIDEPROXYHTTPS` | Sets the URL of the HTTP proxy that must be used for outgoing HTTPS requests. | None | -| `OVERRIDEPROXYEXCLUDE` | Bypasses proxy settings for the hosts and domains. Uses a comma-separated list. | None | -| `HYPERVDEFAULTDATAROOT` | Specifies the default location for the Hyper-V VM disk. | None | -| `WINDOWSCONTAINERSDEFAULTDATAROOT` | Specifies the default location for Windows containers. | None | -| `WSLDEFAULTDATAROOT` | Specifies the default location for the WSL distribution disk. | None | -| `DISABLEANALYTICS` | When set to 1, analytics collection will be disabled for the MSI. For more information, see [Analytics](#analytics). | 0 | - - -Additionally, you can also use `/norestart` or `/forcerestart` to control reboot behaviour. - -By default, the installer reboots the machine after a successful installation. When run silently, the reboot is automatic and the user is not prompted. - -## Analytics - -The MSI installer collects anonymous usage statistics relating to installation only. This is to better understand user behaviour and to improve the user experience by identifying and addressing issues or optimizing popular features. - -### How to opt-out - -{{< tabs >}} -{{< tab name="From the GUI" >}} - -When you install Docker Desktop from the default installer GUI, select the **Disable analytics** checkbox located on the bottom-left corner of the **Welcome** dialog. - -{{< /tab >}} -{{< tab name="From the command line" >}} - -When you install Docker Desktop from the command line, use the `DISABLEANALYTICS` property. - -```powershell -msiexec /i "win\msi\bin\en-US\DockerDesktop.msi" /L*V ".\msi.log" DISABLEANALYTICS=1 -``` - -{{< /tab >}} -{{< /tabs >}} - -### Persistence - -If you decide to disable analytics for an installation, your choice is persisted in the registry and honoured across future upgrades and uninstalls. - -However, the key is removed when Docker Desktop is uninstalled and must be configured again via one of the previous methods. - -The registry key is as follows: - -```powershell -SOFTWARE\Docker Inc.\Docker Desktop\DisableMsiAnalytics -``` - -When analytics is disabled, this key is set to `1`. - -## Additional resources - -- [Explore the FAQs](faq.md) diff --git a/content/manuals/desktop/setup/install/linux/_index.md b/content/manuals/desktop/setup/install/linux/_index.md index f034da1a6e1..6440f2a6a94 100644 --- a/content/manuals/desktop/setup/install/linux/_index.md +++ b/content/manuals/desktop/setup/install/linux/_index.md @@ -1,41 +1,43 @@ --- -description: Install Docker on Linux with ease using our step-by-step installation +description: + Install Docker on Linux with ease using our step-by-step installation guide covering system requirements, supported platforms, and where to go next. -keywords: linux, docker linux install, docker linux, linux docker installation, docker +keywords: + linux, docker linux install, docker linux, linux docker installation, docker for linux, docker desktop for linux, installing docker on linux, docker download linux, how to install docker on linux, linux vs docker engine, switch docker contexts title: Install Docker Desktop on Linux linkTitle: Linux weight: 60 aliases: -- /desktop/linux/install/ -- /desktop/install/linux-install/ -- /desktop/install/linux/ + - /desktop/linux/install/ + - /desktop/install/linux-install/ + - /desktop/install/linux/ --- > **Docker Desktop terms** > > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees or more than $10 million USD in annual revenue) requires a [paid -> subscription](https://www.docker.com/pricing/). +> subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopLinuxInstall). This page contains information about general system requirements, supported platforms, and instructions on how to install Docker Desktop for Linux. > [!IMPORTANT] > ->Docker Desktop on Linux runs a Virtual Machine (VM) which creates and uses a custom docker context, `desktop-linux`, on startup. +> Docker Desktop on Linux runs a Virtual Machine (VM) which creates and uses a custom docker context, `desktop-linux`, on startup. > ->This means images and containers deployed on the Linux Docker Engine (before installation) are not available in Docker Desktop for Linux. +> This means images and containers deployed on the Linux Docker Engine (before installation) are not available in Docker Desktop for Linux. > > {{< accordion title=" Docker Desktop vs Docker Engine: What's the difference?" >}} > [!IMPORTANT] > -> For commercial use of Docker Engine obtained via Docker Desktop within larger enterprises (exceeding 250 employees or with annual revenue surpassing $10 million USD), a [paid subscription](https://www.docker.com/pricing/) is required. +> For commercial use of Docker Engine obtained via Docker Desktop within larger enterprises (exceeding 250 employees or with annual revenue surpassing $10 million USD), a [paid subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopLinuxInstall) is required. Docker Desktop for Linux provides a user-friendly graphical interface that simplifies the management of containers and services. It includes Docker Engine as this is the core technology that powers Docker containers. Docker Desktop for Linux also comes with additional features like Docker Scout and Docker Extensions. -#### Installing Docker Desktop and Docker Engine +### Installing Docker Desktop and Docker Engine Docker Desktop for Linux and Docker Engine can be installed side-by-side on the same machine. Docker Desktop for Linux stores containers and images in an isolated @@ -88,7 +90,7 @@ machine. The current context is indicated with an asterisk (`*`). $ docker context ls NAME DESCRIPTION DOCKER ENDPOINT ... default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock ... -desktop-linux unix:///home//.docker/desktop/docker.sock ... +desktop-linux unix:///home//.docker/desktop/docker.sock ... ``` If you have both Docker Desktop and Docker Engine installed on the same machine, @@ -101,14 +103,15 @@ $ docker context use default default Current context is now "default" ``` - + And use the `desktop-linux` context to interact with Docker Desktop: - + ```console $ docker context use desktop-linux desktop-linux Current context is now "desktop-linux" -``` +``` + Refer to the [Docker Context documentation](/manuals/engine/manage-resources/contexts.md) for more details. {{< /accordion >}} @@ -117,17 +120,16 @@ Refer to the [Docker Context documentation](/manuals/engine/manage-resources/con Docker provides `.deb` and `.rpm` packages for the following Linux distributions and architectures: -| Platform | x86_64 / amd64 | -|:------------------------|:-----------------------:| -| [Ubuntu](ubuntu.md) | ✅ | -| [Debian](debian.md) | ✅ | -| [Red Hat Enterprise Linux (RHEL)](rhel.md) | ✅ | -| [Fedora](fedora.md) | ✅ | - +| Platform | x86_64 / amd64 | +| :----------------------------------------- | :------------: | +| [Ubuntu](ubuntu.md) | ✅ | +| [Debian](debian.md) | ✅ | +| [Red Hat Enterprise Linux (RHEL)](rhel.md) | ✅ | +| [Fedora](fedora.md) | ✅ | An experimental package is available for [Arch](archlinux.md)-based distributions. Docker has not tested or verified the installation. -Docker supports Docker Desktop on the current LTS release of the aforementioned distributions and the most recent version. As new versions are made available, Docker stops supporting the oldest version and supports the newest version. +Docker supports Docker Desktop on the current and previous LTS releases of the aforementioned distributions, as well as the most recent version. ## General system requirements @@ -137,7 +139,7 @@ To install Docker Desktop successfully, your Linux host must meet the following - KVM virtualization support. Follow the [KVM virtualization support instructions](#kvm-virtualization-support) to check if the KVM kernel modules are enabled and how to provide access to the KVM device. - QEMU must be version 5.2 or later. We recommend upgrading to the latest version. - systemd init system. -- GNOME, KDE, or MATE desktop environment. +- GNOME, KDE, or MATE desktop environments are supported but others may work. - For many Linux distributions, the GNOME environment does not support tray icons. To add support for tray icons, you need to install a GNOME extension. For example, [AppIndicator](https://extensions.gnome.org/extension/615/appindicator-support/). - At least 4 GB of RAM. - Enable configuring ID mapping in user namespaces, see [File sharing](/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md#how-do-i-enable-file-sharing). Note that for Docker Desktop version 4.35 and later, this is not required anymore. @@ -151,7 +153,6 @@ Docker Desktop for Linux runs a Virtual Machine (VM). For more information on wh ### KVM virtualization support - Docker Desktop runs a VM that requires [KVM support](https://www.linux-kvm.org). The `kvm` module should load automatically if the host has virtualization support. To load the module manually, run: @@ -186,7 +187,6 @@ irqbypass 16384 1 kvm #### Set up KVM device user permissions - To check ownership of `/dev/kvm`, run : ```console @@ -201,13 +201,15 @@ $ sudo usermod -aG kvm $USER Sign out and sign back in so that your group membership is re-evaluated. -## Where to go next +## Using Docker SDKs with Docker Desktop -- Install Docker Desktop for Linux for your specific Linux distribution: - - [Install on Ubuntu](ubuntu.md) - - [Install on Debian](debian.md) - - [Install on Red Hat Enterprise Linux (RHEL)](rhel.md) - - [Install on Fedora](fedora.md) - - [Install on Arch](archlinux.md) +Docker Desktop for Linux uses a per-user socket instead of the system-wide `/var/run/docker.sock`. Docker SDKs and tools that connect directly to the Docker daemon need the `DOCKER_HOST` environment variable set to connect to Docker Desktop. For configuration details, see [How do I use Docker SDKs with Docker Desktop for Linux?](/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md#how-do-i-use-docker-sdks-with-docker-desktop-for-linux). +## Where to go next +- Install Docker Desktop for Linux for your specific Linux distribution: + - [Install on Ubuntu](ubuntu.md) + - [Install on Debian](debian.md) + - [Install on Red Hat Enterprise Linux (RHEL)](rhel.md) + - [Install on Fedora](fedora.md) + - [Install on Arch](archlinux.md) diff --git a/content/manuals/desktop/setup/install/linux/archlinux.md b/content/manuals/desktop/setup/install/linux/archlinux.md index 06ea014d235..6dc58a5c619 100644 --- a/content/manuals/desktop/setup/install/linux/archlinux.md +++ b/content/manuals/desktop/setup/install/linux/archlinux.md @@ -17,12 +17,10 @@ aliases: > > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees OR more than $10 million USD in annual revenue) requires a [paid -> subscription](https://www.docker.com/pricing/). +> subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopArchlinuxInstall). This page contains information on how to install, launch and upgrade Docker Desktop on an Arch-based distribution. - - ## Prerequisites To install Docker Desktop successfully, you must meet the [general system requirements](_index.md#general-system-requirements). @@ -33,7 +31,7 @@ To install Docker Desktop successfully, you must meet the [general system requir ```console $ wget https://download.docker.com/linux/static/stable/x86_64/docker-{{% param "docker_ce_version" %}}.tgz -qO- | tar xvfz - docker/docker --strip-components=1 - $ mv ./docker /usr/local/bin + $ sudo cp -rp ./docker /usr/local/bin/ && rm -r ./docker ``` 2. Download the latest Arch package from the [Release notes](/manuals/desktop/release-notes.md). @@ -52,7 +50,7 @@ To install Docker Desktop successfully, you must meet the [general system requir ## Next steps -- Explore [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Explore [Docker's subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopArchlinuxInstall) to see what Docker can offer you. - Take a look at the [Docker workshop](/get-started/workshop/_index.md) to learn how to build an image and run it as a containerized application. - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, how to run and submit diagnostics, and submit issues. diff --git a/content/manuals/desktop/setup/install/linux/debian.md b/content/manuals/desktop/setup/install/linux/debian.md index 7938ea07b77..65479b97d4a 100644 --- a/content/manuals/desktop/setup/install/linux/debian.md +++ b/content/manuals/desktop/setup/install/linux/debian.md @@ -16,7 +16,7 @@ aliases: > > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees OR more than $10 million USD in annual revenue) requires a [paid -> subscription](https://www.docker.com/pricing/). +> subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopDebianInstall). This page contains information on how to install, launch, and upgrade Docker Desktop on a Debian distribution. @@ -60,7 +60,7 @@ Recommended approach to install Docker Desktop on Debian: By default, Docker Desktop is installed at `/opt/docker-desktop`. -The RPM package includes a post-install script that completes additional setup steps automatically. +The DEB package includes a post-install script that completes additional setup steps automatically. The post-install script: @@ -84,7 +84,7 @@ $ sudo apt-get install ./docker-desktop-amd64.deb ## Next steps -- Explore [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Explore [Docker's subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopDebianInstall) to see what Docker can offer you. - Take a look at the [Docker workshop](/get-started/workshop/_index.md) to learn how to build an image and run it as a containerized application. - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, how to run and submit diagnostics, and submit issues. diff --git a/content/manuals/desktop/setup/install/linux/fedora.md b/content/manuals/desktop/setup/install/linux/fedora.md index 4148069837f..20f5ee348d5 100644 --- a/content/manuals/desktop/setup/install/linux/fedora.md +++ b/content/manuals/desktop/setup/install/linux/fedora.md @@ -16,7 +16,7 @@ aliases: > > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees OR more than $10 million USD in annual revenue) requires a [paid -> subscription](https://www.docker.com/pricing/). +> subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopFedoraInstall). This page contains information on how to install, launch and upgrade Docker Desktop on a Fedora distribution. @@ -25,7 +25,7 @@ This page contains information on how to install, launch and upgrade Docker Desk To install Docker Desktop successfully, you must: - Meet the [general system requirements](_index.md#general-system-requirements). -- Have a 64-bit version of Fedora 40 or Fedora 41. +- Have a 64-bit version of Fedora 42 or Fedora 43. - For a GNOME desktop environment you must install AppIndicator and KStatusNotifierItem [GNOME extensions](https://extensions.gnome.org/extension/615/appindicator-support/). - If you're not using GNOME, you must install `gnome-terminal` to enable terminal access from Docker Desktop: @@ -74,7 +74,7 @@ $ sudo dnf install ./docker-desktop-x86_64.rpm ## Next steps -- Explore [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Explore [Docker's subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopFedoraInstall) to see what Docker can offer you. - Take a look at the [Docker workshop](/get-started/workshop/_index.md) to learn how to build an image and run it as a containerized application. - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, how to run and submit diagnostics, and submit issues. diff --git a/content/manuals/desktop/setup/install/linux/rhel.md b/content/manuals/desktop/setup/install/linux/rhel.md index 88f0ae7aad7..68e7e3ce586 100644 --- a/content/manuals/desktop/setup/install/linux/rhel.md +++ b/content/manuals/desktop/setup/install/linux/rhel.md @@ -14,7 +14,7 @@ aliases: > > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees or more than $10 million USD in annual revenue) requires a [paid -> subscription](https://www.docker.com/pricing/). +> subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopRhelInstall). This page contains information on how to install, launch and upgrade Docker Desktop on a Red Hat Enterprise Linux (RHEL) distribution. @@ -23,24 +23,23 @@ This page contains information on how to install, launch and upgrade Docker Desk To install Docker Desktop successfully, you must: - Meet the [general system requirements](_index.md#general-system-requirements). -- Have a 64-bit version of either RHEL 8 or RHEL 9. -- Have a [Docker account](/manuals/accounts/create-account.md), as authentication is required for Docker Desktop on RHEL. +- Have a 64-bit version of either RHEL 9 or RHEL 10. - If `pass` is not installed, or it can't be installed, you must enable [CodeReady Linux Builder (CRB) repository](https://access.redhat.com/articles/4348511) and [Extra Packages for Enterprise Linux (EPEL)](https://docs.fedoraproject.org/en-US/epel/). {{< tabs group="os_version" >}} - {{< tab name="RHEL 9" >}} + {{< tab name="RHEL 10" >}} ```console - $ sudo subscription-manager repos --enable codeready-builder-for-rhel-9-$(arch)-rpms - $ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + $ sudo subscription-manager repos --enable codeready-builder-for-rhel-10-$(arch)-rpms + $ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-10.noarch.rpm $ sudo dnf install pass ``` {{< /tab >}} - {{< tab name="RHEL 8" >}} + {{< tab name="RHEL 9" >}} ```console - $ sudo subscription-manager repos --enable codeready-builder-for-rhel-8-$(arch)-rpms - $ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + $ sudo subscription-manager repos --enable codeready-builder-for-rhel-9-$(arch)-rpms + $ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm $ sudo dnf install pass ``` @@ -50,7 +49,7 @@ To install Docker Desktop successfully, you must: - For a GNOME desktop environment you must install AppIndicator and KStatusNotifierItem [GNOME extensions](https://extensions.gnome.org/extension/615/appindicator-support/). You must also enable EPEL. {{< tabs group="os_version" >}} - {{< tab name="RHEL 9" >}} + {{< tab name="RHEL 10" >}} ```console $ # enable EPEL as described above $ sudo dnf install gnome-shell-extension-appindicator @@ -58,12 +57,11 @@ To install Docker Desktop successfully, you must: ``` {{< /tab >}} - {{< tab name="RHEL 8" >}} + {{< tab name="RHEL 9" >}} ```console $ # enable EPEL as described above $ sudo dnf install gnome-shell-extension-appindicator - $ sudo dnf install gnome-shell-extension-desktop-icons - $ sudo gnome-shell-extension-tool -e appindicatorsupport@rgcjonas.gmail.com + $ sudo gnome-extensions enable appindicatorsupport@rgcjonas.gmail.com ``` {{< /tab >}} @@ -107,10 +105,6 @@ The post-install script: {{% include "desktop-linux-launch.md" %}} -> [!IMPORTANT] -> -> After launching Docker Desktop for RHEL, you must sign in to your Docker account to start using Docker Desktop. - > [!TIP] > > To attach Red Hat subscription data to containers, see [Red Hat verified solution](https://access.redhat.com/solutions/5870841). @@ -132,7 +126,7 @@ $ sudo dnf install ./docker-desktop--rhel.rpm ## Next steps -- Review [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Review [Docker's subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopRhelInstall) to see what Docker can offer you. - Take a look at the [Docker workshop](/get-started/workshop/_index.md) to learn how to build an image and run it as a containerized application. - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, how to run and submit diagnostics, and submit issues. diff --git a/content/manuals/desktop/setup/install/linux/ubuntu.md b/content/manuals/desktop/setup/install/linux/ubuntu.md index 282bb8d8184..fa8102b62fa 100644 --- a/content/manuals/desktop/setup/install/linux/ubuntu.md +++ b/content/manuals/desktop/setup/install/linux/ubuntu.md @@ -1,7 +1,9 @@ --- -description: Learn how to install, launch, and upgrade Docker Desktop on Ubuntu. This +description: + Learn how to install, launch, and upgrade Docker Desktop on Ubuntu. This quick guide will cover prerequisites, installation methods, and more. -keywords: install docker ubuntu, ubuntu install docker, install docker on ubuntu, +keywords: + install docker ubuntu, ubuntu install docker, install docker on ubuntu, docker install ubuntu, how to install docker on ubuntu, ubuntu docker install, docker installation on ubuntu, docker ubuntu install, docker installing ubuntu, installing docker on ubuntu, docker desktop for ubuntu @@ -10,16 +12,16 @@ linkTitle: Ubuntu weight: 10 toc_max: 4 aliases: -- /desktop/linux/install/ubuntu/ -- /desktop/install/ubuntu/ -- /desktop/install/linux/ubuntu/ + - /desktop/linux/install/ubuntu/ + - /desktop/install/ubuntu/ + - /desktop/install/linux/ubuntu/ --- > **Docker Desktop terms** > > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees or more than $10 million USD in annual revenue) requires a [paid -> subscription](https://www.docker.com/pricing/). +> subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopUbuntuInstall). This page contains information on how to install, launch and upgrade Docker Desktop on an Ubuntu distribution. @@ -28,7 +30,7 @@ This page contains information on how to install, launch and upgrade Docker Desk To install Docker Desktop successfully, you must: - Meet the [general system requirements](_index.md#general-system-requirements). -- Have an x86-64 system with Ubuntu 22.04, 24.04, or the latest non-LTS version. +- Have an x86-64 system with Ubuntu 26.04 LTS or 24.04 LTS. - If you're not using GNOME, you must install `gnome-terminal` to enable terminal access from Docker Desktop: ```console $ sudo apt install gnome-terminal @@ -47,7 +49,7 @@ Recommended approach to install Docker Desktop on Ubuntu: ```console $ sudo apt-get update - $ sudo apt-get install ./docker-desktop-amd64.deb + $ sudo apt install ./docker-desktop-amd64.deb ``` > [!NOTE] @@ -68,7 +70,7 @@ The post-install script: - Sets the capability on the Docker Desktop binary to map privileged ports and set resource limits. - Adds a DNS name for Kubernetes to `/etc/hosts`. - Creates a symlink from `/usr/local/bin/com.docker.cli` to `/usr/bin/docker`. - This is because the classic Docker CLI is installed at `/usr/bin/docker`. The Docker Desktop installer also installs a Docker CLI binary that includes cloud-integration capabilities and is essentially a wrapper for the Compose CLI, at `/usr/local/bin/com.docker.cli`. The symlink ensures that the wrapper can access the classic Docker CLI. + This is because the classic Docker CLI is installed at `/usr/bin/docker`. The Docker Desktop installer also installs a Docker CLI binary that includes cloud-integration capabilities and is essentially a wrapper for the Compose CLI, at `/usr/local/bin/com.docker.cli`. The symlink ensures that the wrapper can access the classic Docker CLI. ## Launch Docker Desktop @@ -80,12 +82,12 @@ When a new version for Docker Desktop is released, the Docker UI shows a notific You need to download the new package each time you want to upgrade Docker Desktop and run: ```console -$ sudo apt-get install ./docker-desktop-amd64.deb +$ sudo apt install ./docker-desktop-amd64.deb ``` ## Next steps -- Review [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Review [Docker's subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopUbuntuInstall) to see what Docker can offer you. - Follow the [Docker workshop](/get-started/workshop/_index.md) to learn how to build an image and run it as a containerized application. - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, how to run and submit diagnostics, and submit issues. diff --git a/content/manuals/desktop/setup/install/mac-install.md b/content/manuals/desktop/setup/install/mac-install.md index 41940a29955..25a8db8a42a 100644 --- a/content/manuals/desktop/setup/install/mac-install.md +++ b/content/manuals/desktop/setup/install/mac-install.md @@ -16,13 +16,14 @@ aliases: - /docker-for-mac/apple-silicon/ - /desktop/mac/apple-silicon/ - /desktop/install/mac-install/ +- /desktop/install/mac/ --- > **Docker Desktop terms** > > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees or more than $10 million USD in annual revenue) requires a [paid -> subscription](https://www.docker.com/pricing/). +> subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopMacInstall). This page provides download links, system requirements, and step-by-step installation instructions for Docker Desktop on Mac. @@ -31,14 +32,10 @@ This page provides download links, system requirements, and step-by-step install *For checksums, see [Release notes](/manuals/desktop/release-notes.md).* -> [!WARNING] -> -> If you're experiencing malware detection issues, follow the steps documented in [docker/for-mac#7527](https://github.com/docker/for-mac/issues/7527). - ## System requirements {{< tabs >}} -{{< tab name="Mac with Intel chip" >}} +{{< tab name="Mac with Apple silicon" >}} - A supported version of macOS. @@ -47,9 +44,13 @@ This page provides download links, system requirements, and step-by-step install > Docker Desktop is supported on the current and two previous major macOS releases. As new major versions of macOS are made generally available, Docker stops supporting the oldest version and supports the newest version of macOS (in addition to the previous two releases). - At least 4 GB of RAM. +- For the best experience, it's recommended that you install Rosetta 2. Rosetta 2 is no longer strictly required, however there are a few optional command line tools that still require Rosetta 2 when using Darwin/AMD64. See [Known issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). To install Rosetta 2 manually from the command line, run the following command: + ```console + $ softwareupdate --install-rosetta + ``` {{< /tab >}} -{{< tab name="Mac with Apple silicon" >}} +{{< tab name="Mac with Intel chip" >}} - A supported version of macOS. @@ -58,14 +59,20 @@ This page provides download links, system requirements, and step-by-step install > Docker Desktop is supported on the current and two previous major macOS releases. As new major versions of macOS are made generally available, Docker stops supporting the oldest version and supports the newest version of macOS (in addition to the previous two releases). - At least 4 GB of RAM. -- For the best experience, it's recommended that you install Rosetta 2. Rosetta 2 is no longer strictly required, however there are a few optional command line tools that still require Rosetta 2 when using Darwin/AMD64. See [Known issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). To install Rosetta 2 manually from the command line, run the following command: - ```console - $ softwareupdate --install-rosetta - ``` {{< /tab >}} {{< /tabs >}} +> **Before you install or update** +> +> - Quit tools that might call Docker in the background (Visual Studio Code, terminals, agent apps). +> +> - If you manage fleets or install via MDM, use the [**PKG installer**](/manuals/enterprise/enterprise-deployment/pkg-install-and-configure.md). +> +> - Keep the installer volume mounted until the installation completes. +> +> If you encounter a "Docker.app is damaged" dialog, see [Fix "Docker.app is damaged" on macOS](/manuals/desktop/troubleshoot-and-support/troubleshoot/mac-damaged-dialog.md). + ## Install and run Docker Desktop on Mac > [!TIP] @@ -124,7 +131,7 @@ The `install` command accepts the following flags: - `--allowed-org=`: Requires the user to sign in and be part of the specified Docker Hub organization when running the application - `--user=`: Performs the privileged configurations once during installation. This removes the need for the user to grant root privileges on first run. For more information, see [Privileged helper permission requirements](/manuals/desktop/setup/install/mac-permission-requirements.md#permission-requirements). To find the username, enter `ls /Users` in the CLI. -- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by administrators to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by administrators to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). - It must be used together with the `--allowed-org=` flag. - For example: `--allowed-org= --admin-settings="{'configurationFileVersion': 2, 'enhancedContainerIsolation': {'value': true, 'locked': false}}"` @@ -134,10 +141,24 @@ The `install` command accepts the following flags: - `--override-proxy-http=`: Sets the URL of the HTTP proxy that must be used for outgoing HTTP requests. It requires `--proxy-http-mode` to be `manual`. - `--override-proxy-https=`: Sets the URL of the HTTP proxy that must be used for outgoing HTTPS requests, requires `--proxy-http-mode` to be `manual` - `--override-proxy-exclude=`: Bypasses proxy settings for the hosts and domains. It's a comma-separated list. +- `--override-proxy-pac=`: Sets the PAC file URL. This setting takes effect only when using `manual` proxy mode. +- `--override-proxy-embedded-pac=`: Specifies an embedded PAC (Proxy Auto-Config) script. This setting takes effect only when using `manual` proxy mode and has precedence over the `--override-proxy-pac` flag. + +###### Example of specifying PAC file + +```console +$ sudo /Applications/Docker.app/Contents/MacOS/install --user testuser --proxy-http-mode="manual" --override-proxy-pac="http://localhost:8080/myproxy.pac" +``` + +###### Example of specifying PAC script + +```console +$ sudo /Applications/Docker.app/Contents/MacOS/install --user testuser --proxy-http-mode="manual" --override-proxy-embedded-pac="function FindProxyForURL(url, host) { return \"DIRECT\"; }" +``` > [!TIP] > -> As an IT administrator, you can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +> As an IT administrator, you can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). > - [Intune](https://learn.microsoft.com/en-us/mem/intune/apps/app-discovered-apps) > - [Jamf](https://docs.jamf.com/10.25.0/jamf-pro/administrator-guide/Application_Usage.html) > - [Kandji](https://support.kandji.io/support/solutions/articles/72000559793-view-a-device-application-list) @@ -146,7 +167,7 @@ The `install` command accepts the following flags: ## Where to go next -- Explore [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Explore [Docker's subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopMacInstall) to see what Docker can offer you. - [Get started with Docker](/get-started/introduction/_index.md). - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, how diff --git a/content/manuals/desktop/setup/install/mac-permission-requirements.md b/content/manuals/desktop/setup/install/mac-permission-requirements.md index 564f4e5be98..20942720160 100644 --- a/content/manuals/desktop/setup/install/mac-permission-requirements.md +++ b/content/manuals/desktop/setup/install/mac-permission-requirements.md @@ -105,7 +105,7 @@ retain their original permissions. ## Enhanced Container Isolation In addition, Docker Desktop supports [Enhanced Container Isolation -mode](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) (ECI), +mode](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) (ECI), available to Business customers only, which further secures containers without impacting developer workflows. diff --git a/content/manuals/desktop/setup/install/windows-install.md b/content/manuals/desktop/setup/install/windows-install.md index 1bb4e86bb2e..39104250433 100644 --- a/content/manuals/desktop/setup/install/windows-install.md +++ b/content/manuals/desktop/setup/install/windows-install.md @@ -26,7 +26,7 @@ aliases: > > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees OR more than $10 million USD in annual revenue) requires a [paid -> subscription](https://www.docker.com/pricing/). +> subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopWindowsInstall). This page provides download links, system requirements, and step-by-step installation instructions for Docker Desktop on Windows. @@ -36,28 +36,45 @@ This page provides download links, system requirements, and step-by-step install _For checksums, see [Release notes](/manuals/desktop/release-notes.md)_ +## Installation modes + +Docker Desktop supports two installation modes. Per-user installation (Beta) is recommended for most users. It does not require administrator privileges to install or update, and the WSL 2 backend it uses covers the needs of the vast majority of Docker Desktop users. + +| | Per-user (recommended) | All users | +|---|---|---| +| Install location | `%LOCALAPPDATA%\Programs\DockerDesktop` | `C:\Program Files\Docker\Docker` | +| Registry keys | Current User (HKCU) | Local Machine (HKLM) | +| Admin rights to install | Not required | Required | +| Admin rights to update | Not required | Required | +| Linux containers backend | WSL 2 only | WSL 2 or Hyper-V | +| Windows containers | Not supported | Supported | +| Security | Smaller attack surface; no privileged system service installed | Requires privileged system service; broader access to host resources | + +For more information, see [Understand permission requirements for Windows](windows-install.md). + ## System requirements > [!TIP] > > **Should I use Hyper-V or WSL?** > -> Docker Desktop's functionality remains consistent on both WSL and Hyper-V, without a preference for either architecture. Hyper-V and WSL have their own advantages and disadvantages, depending on your specific setup and your planned use case. +> Docker Desktop's functionality remains consistent on both WSL and Hyper-V, without a preference for either architecture. Hyper-V and WSL have their own advantages and disadvantages, depending on your specific setup and your planned use case. Note that Hyper-V is only available with all-users installation. If you install Docker Desktop in per-user mode, WSL 2 is the only supported backend. {{< tabs >}} {{< tab name="WSL 2 backend, x86_64" >}} -- WSL version 1.1.3.0 or later. -- Windows 11 64-bit: Home or Pro version 22H2 or higher, or Enterprise or Education version 22H2 or higher. -- Windows 10 64-bit: Minimum required is Home or Pro 22H2 (build 19045) or higher, or Enterprise or Education 22H2 (build 19045) or higher. +- WSL version 2.1.5 or later. To check your version, see [WSL: Verification and setup](#wsl-verification-and-setup) +- If you intend to use Enhanced Container Isolation, ensure you’re using WSL version 2.6 or later. This is required because ECI depends on a Linux kernel version of at least 6.3.0, and WSL 2.6+ bundles Linux kernel version 6.6. +- Windows 10 64-bit: Enterprise, Pro, or Education version 22H2 (build 19045). +- Windows 11 64-bit: Enterprise, Pro, or Education version 23H2 (build 22631) or higher. - Turn on the WSL 2 feature on Windows. For detailed instructions, refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows/wsl/install-win10). - The following hardware prerequisites are required to successfully run WSL 2 on Windows 10 or Windows 11: - 64-bit processor with [Second Level Address Translation (SLAT)](https://en.wikipedia.org/wiki/Second_Level_Address_Translation) - - 4GB system RAM + - 8GB system RAM - Enable hardware virtualization in BIOS/UEFI. For more information, see - [Virtualization](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#virtualization). + [Virtualization](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#docker-desktop-fails-due-to-virtualization-not-working). For more information on setting up WSL 2 with Docker Desktop, see [WSL](/manuals/desktop/features/wsl/_index.md). @@ -67,20 +84,20 @@ For more information on setting up WSL 2 with Docker Desktop, see [WSL](/manuals > [!IMPORTANT] > -> To run Windows containers, you need Windows 10 or Windows 11 Professional or Enterprise edition. +> To run [Windows containers](#windows-containers), you need Windows 10 or Windows 11 Professional or Enterprise edition. > Windows Home or Education editions only allow you to run Linux containers. {{< /tab >}} {{< tab name="Hyper-V backend, x86_64" >}} -- Windows 11 64-bit: Enterprise, Pro, or Education version 22H2 or higher. -- Windows 10 64-bit: Enterprise, Pro, or Education version 22H2 (build 19045) or higher. +- Windows 10 64-bit: Enterprise, Pro, or Education version 22H2 (build 19045). +- Windows 11 64-bit: Enterprise, Pro, or Education version 23H2 (build 22631) or higher. - Turn on Hyper-V and Containers Windows features. - The following hardware prerequisites are required to successfully run Client Hyper-V on Windows 10: - 64 bit processor with [Second Level Address Translation (SLAT)](https://en.wikipedia.org/wiki/Second_Level_Address_Translation) - - 4GB system RAM + - 8GB system RAM - Turn on BIOS/UEFI-level hardware virtualization support in the BIOS/UEFI settings. For more information, see [Virtualization](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#virtualization). @@ -91,21 +108,21 @@ For more information on setting up WSL 2 with Docker Desktop, see [WSL](/manuals > [!IMPORTANT] > -> To run Windows containers, you need Windows 10 or Windows 11 Professional or Enterprise edition. +> To run [Windows containers](#windows-containers), you need Windows 10 or Windows 11 Professional or Enterprise edition. > Windows Home or Education editions only allow you to run Linux containers. {{< /tab >}} {{< tab name="WSL 2 backend, Arm (Early Access)" >}} -- WSL version 1.1.3.0 or later. -- Windows 11 64-bit: Home or Pro version 22H2 or higher, or Enterprise or Education version 22H2 or higher. -- Windows 10 64-bit: Minimum required is Home or Pro 22H2 (build 19045) or higher, or Enterprise or Education 22H2 (build 19045) or higher. +- WSL version 2.1.5 or later. To check your version, see [WSL: Verification and setup](#wsl-verification-and-setup) +- Windows 10 64-bit: Enterprise, Pro, or Education version 22H2 (build 19045). +- Windows 11 64-bit: Enterprise, Pro, or Education version 23H2 (build 22631) or higher. - Turn on the WSL 2 feature on Windows. For detailed instructions, refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows/wsl/install-win10). - The following hardware prerequisites are required to successfully run WSL 2 on Windows 10 or Windows 11: - 64-bit processor with [Second Level Address Translation (SLAT)](https://en.wikipedia.org/wiki/Second_Level_Address_Translation) - - 4GB system RAM + - 8GB system RAM - Enable hardware virtualization in BIOS/UEFI. For more information, see [Virtualization](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#virtualization). @@ -124,51 +141,17 @@ Running Docker Desktop inside a VMware ESXi or Azure VM is supported for Docker It requires enabling nested virtualization on the hypervisor first. For more information, see [Running Docker Desktop in a VM or VDI environment](/manuals/desktop/setup/vm-vdi.md). -{{< accordion title="How do I switch between Windows and Linux containers?" >}} - -From the Docker Desktop menu, you can toggle which daemon (Linux or Windows) -the Docker CLI talks to. Select **Switch to Windows containers** to use Windows -containers, or select **Switch to Linux containers** to use Linux containers -(the default). - -For more information on Windows containers, refer to the following documentation: - -- Microsoft documentation on [Windows containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/index). - -- [Build and Run Your First Windows Server Container (Blog Post)](https://www.docker.com/blog/build-your-first-docker-windows-server-container/) - gives a quick tour of how to build and run native Docker Windows containers on Windows 10 and Windows Server 2016 evaluation releases. - -- [Getting Started with Windows Containers (Lab)](https://github.com/docker/labs/blob/master/windows/windows-containers/README.md) - shows you how to use the [MusicStore](https://github.com/aspnet/MusicStore/) - application with Windows containers. The MusicStore is a standard .NET application and, - [forked here to use containers](https://github.com/friism/MusicStore), is a good example of a multi-container application. - -- To understand how to connect to Windows containers from the local host, see - [I want to connect to a container from Windows](/manuals/desktop/features/networking.md#i-want-to-connect-to-a-container-from-the-host) - -> [!NOTE] -> -> When you switch to Windows containers, **Settings** only shows those tabs that are active and apply to your Windows containers. - -If you set proxies or daemon configuration in Windows containers mode, these -apply only on Windows containers. If you switch back to Linux containers, -proxies and daemon configurations return to what you had set for Linux -containers. Your Windows container settings are retained and become available -again when you switch back. - -{{< /accordion >}} - ## Install Docker Desktop on Windows -> [!TIP] -> -> See the [FAQs](/manuals/desktop/troubleshoot-and-support/faqs/general.md#how-do-i-run-docker-desktop-without-administrator-privileges) on how to install and run Docker Desktop without needing administrator privileges. - ### Install interactively 1. Download the installer using the download button at the top of the page, or from the [release notes](/manuals/desktop/release-notes.md). -2. Double-click `Docker Desktop Installer.exe` to run the installer. By default, Docker Desktop is installed at `C:\Program Files\Docker\Docker`. +2. Double-click `Docker Desktop Installer.exe` to run the installer. The installer will ask which installation mode you prefer. Choosing per-user installs to `%LOCALAPPDATA%\Programs\DockerDesktop` and requires no administrator privileges. Choosing all users will prompt for elevation. + + > [!NOTE] + > + >If you want to switch installation mode at a later date, you need to uninstall and reinstall Docker Desktop. 3. When prompted, ensure the **Use WSL 2 instead of Hyper-V** option on the Configuration page is selected or not depending on your choice of backend. @@ -180,35 +163,112 @@ again when you switch back. 6. [Start Docker Desktop](#start-docker-desktop). -If your administrator account is different to your user account, you must add the user to the **docker-users** group: -1. Run **Computer Management** as an **administrator**. -2. Navigate to **Local Users and Groups** > **Groups** > **docker-users**. -3. Right-click to add the user to the group. -4. Sign out and sign back in for the changes to take effect. - ### Install from the command line -After downloading `Docker Desktop Installer.exe`, run the following command in a terminal to install Docker Desktop: +After downloading `Docker Desktop Installer.exe`, run the following command in a terminal to install Docker Desktop to `%LOCALAPPDATA%\Programs\DockerDesktop`. + +For per-user installation, run: ```console -$ "Docker Desktop Installer.exe" install +$ "Docker Desktop Installer.exe" install --user ``` -If you’re using PowerShell you should run it as: +To install for all users on the machine (requires administrator privileges): + +```console +$ "Docker Desktop Installer.exe" install +``` +If you're using PowerShell you should run it as: + ```powershell +# Per-user installation (no admin required) +Start-Process 'Docker Desktop Installer.exe' -Wait -ArgumentList 'install', '--user' + +# All-users installation (run as administrator) Start-Process 'Docker Desktop Installer.exe' -Wait install ``` If using the Windows Command Prompt: - + ```sh +# Per-user installation (no admin required) +start /w "" "Docker Desktop Installer.exe" install --user + +# All-users installation (run as administrator) start /w "" "Docker Desktop Installer.exe" install ``` -By default, Docker Desktop is installed at `C:\Program Files\Docker\Docker`. +If using all-users installation and your administrator account is different to your user account, you must add the user to the **docker-users** group to access features that require higher privileges, such as creating and managing the Hyper-V VM, or using Windows containers: -#### Installer flags +```console +$ net localgroup docker-users /add +``` + +See the [Installer flags](#installer-flags) section to see what flags the `install` command accepts. + +> [!NOTE] +> +>If you want to switch installation mode at a later date, you need to uninstall and reinstall Docker Desktop. + +## Start Docker Desktop + +Docker Desktop does not start automatically after installation. To start Docker Desktop: + +1. Search for Docker, and select **Docker Desktop** in the search results. + +2. The Docker menu ({{< inline-image src="images/whale-x.svg" alt="whale menu" >}}) displays the Docker Subscription Service Agreement. + + {{% include "desktop-license-update.md" %}} + +3. Select **Accept** to continue. Docker Desktop starts after you accept the terms. + + Note that Docker Desktop won't run if you do not agree to the terms. You can choose to accept the terms at a later date by opening Docker Desktop. + + For more information, see [Docker Desktop Subscription Service Agreement](https://www.docker.com/legal/docker-subscription-service-agreement/). It is recommended that you read the [FAQs](https://www.docker.com/pricing/faq). + +> [!TIP] +> +> As an IT administrator, you can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). +> - [Intune](https://learn.microsoft.com/en-us/mem/intune/apps/app-discovered-apps) +> - [Jamf](https://docs.jamf.com/10.25.0/jamf-pro/administrator-guide/Application_Usage.html) +> - [Kandji](https://support.kandji.io/support/solutions/articles/72000559793-view-a-device-application-list) +> - [Kolide](https://www.kolide.com/features/device-inventory/properties/mac-apps) +> - [Workspace One](https://blogs.vmware.com/euc/2022/11/how-to-use-workspace-one-intelligence-to-manage-app-licenses-and-reduce-costs.html) + + +## Advanced system configuration and installation options + +### WSL: Verification and setup + +If you have chosen to use WSL, first verify that your installed version meets system requirements by running the following command in your terminal: + +```console +wsl --version +``` + +If version details do not appear, you are likely using the inbox version of WSL. This version does not support modern capabilities and must be updated. + +You can update or install WSL using one of the following methods: + +#### Option 1: Install or update WSL via the terminal + +1. Open PowerShell or Windows Command Prompt in administrator mode. +2. Run either the install or update command. You may be prompted to restart your machine. For more information, refer to [Install WSL](https://learn.microsoft.com/en-us/windows/wsl/install). +```console +wsl --install + +wsl --update +``` + +#### Option 2: Install WSL via the MSI package + +If Microsoft Store access is blocked due to security policies: +1. Go to the official [WSL GitHub Releases page](https://github.com/microsoft/WSL/releases). +2. Download the `.msi` installer from the latest stable release (under the Assets drop-down). +3. Run the downloaded installer and follow the setup instructions. + +### Installer flags > [!NOTE] > @@ -218,72 +278,101 @@ By default, Docker Desktop is installed at `C:\Program Files\Docker\Docker`. > Start-Process 'Docker Desktop Installer.exe' -Wait -ArgumentList 'install', '--accept-license' > ``` -If your admin account is different to your user account, you must add the user to the **docker-users** group: +#### Installation behavior -```console -$ net localgroup docker-users /add -``` - -The `install` command accepts the following flags: - -##### Installation behavior +- `--user`: Installs Docker Desktop in per-user mode, to `%LOCALAPPDATA%\Programs\DockerDesktop`. No administrator privileges are required. This is the recommended mode for most users. See [Installation modes](#installation-modes). - `--quiet`: Suppresses information output when running the installer - `--accept-license`: Accepts the [Docker Subscription Service Agreement](https://www.docker.com/legal/docker-subscription-service-agreement) now, rather than requiring it to be accepted when the application is first run - `--installation-dir=`: Changes the default installation location (`C:\Program Files\Docker\Docker`) - `--backend=`: Selects the default backend to use for Docker Desktop, `hyper-v`, `windows` or `wsl-2` (default) - `--always-run-service`: After installation completes, starts `com.docker.service` and sets the service startup type to Automatic. This circumvents the need for administrator privileges, which are otherwise necessary to start `com.docker.service`. `com.docker.service` is required by Windows containers and Hyper-V backend. -##### Security and access control +#### Security and access control - `--allowed-org=`: Requires the user to sign in and be part of the specified Docker Hub organization when running the application -- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by admins to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by admins to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). - It must be used together with the `--allowed-org=` flag. - For example:`--allowed-org= --admin-settings="{'configurationFileVersion': 2, 'enhancedContainerIsolation': {'value': true, 'locked': false}}"` - `--no-windows-containers`: Disables the Windows containers integration. This can improve security. For more information, see [Windows containers](/manuals/desktop/setup/install/windows-permission-requirements.md#windows-containers). -##### Proxy configuration +#### Proxy configuration - `--proxy-http-mode=`: Sets the HTTP Proxy mode, `system` (default) or `manual` - `--override-proxy-http=`: Sets the URL of the HTTP proxy that must be used for outgoing HTTP requests, requires `--proxy-http-mode` to be `manual` - `--override-proxy-https=`: Sets the URL of the HTTP proxy that must be used for outgoing HTTPS requests, requires `--proxy-http-mode` to be `manual` - `--override-proxy-exclude=`: Bypasses proxy settings for the hosts and domains. Uses a comma-separated list. - `--proxy-enable-kerberosntlm`: Enables Kerberos and NTLM proxy authentication. If you are enabling this, ensure your proxy server is properly configured for Kerberos/NTLM authentication. Available with Docker Desktop 4.32 and later. +- `--override-proxy-pac=`: Sets the PAC file URL. This setting takes effect only when using `manual` proxy mode. +- `--override-proxy-embedded-pac=`: Specifies an embedded PAC (Proxy Auto-Config) script. This setting takes effect only when using `manual` proxy mode and has precedence over the `--override-proxy-pac` flag. + +##### Example of specifying PAC file + +```console +"Docker Desktop Installer.exe" install --proxy-http-mode="manual" --override-proxy-pac="http://localhost:8080/myproxy.pac" +``` + +##### Example of specifying PAC script -##### Data root and disk location +```console +"Docker Desktop Installer.exe" install --proxy-http-mode="manual" --override-proxy-embedded-pac="function FindProxyForURL(url, host) { return \"DIRECT\"; }" +``` + +#### Data root and disk location - `--hyper-v-default-data-root=`: Specifies the default location for the Hyper-V VM disk. - `--windows-containers-default-data-root=`: Specifies the default location for the Windows containers. - `--wsl-default-data-root=`: Specifies the default location for the WSL distribution disk. -## Start Docker Desktop +### Administrator privileges -Docker Desktop does not start automatically after installation. To start Docker Desktop: +In per-user mode, Docker Desktop can be installed and updated without administrator privileges. Some settings still require elevation and are marked **Requires password** in the Settings UI. Enabling WSL 2 for the first time also requires administrator privileges, but this is a one-time, per-machine operation. -1. Search for Docker, and select **Docker Desktop** in the search results. +In all-users mode, installing Docker Desktop requires administrator privileges. However, once installed, it can be used without administrative access. Some actions, though, still need elevated permissions. See [Understand permission requirements for Windows](./windows-permission-requirements.md) for more detail. -2. The Docker menu ({{< inline-image src="images/whale-x.svg" alt="whale menu" >}}) displays the Docker Subscription Service Agreement. +See the [FAQs](/manuals/desktop/troubleshoot-and-support/faqs/general.md#how-do-i-run-docker-desktop-without-administrator-privileges) on how to install and run Docker Desktop without needing administrator privileges. - {{% include "desktop-license-update.md" %}} +If you're an IT admin and your users do not have administrator rights and plan to perform operations that require elevated privileges, be sure to install Docker Desktop using the `--always-run-service` installer flag. This ensures those actions can still be executed without prompting for User Account Control (UAC) elevation. See [Installer Flags](#installer-flags) for more detail. -3. Select **Accept** to continue. Docker Desktop starts after you accept the terms. +### Windows containers - Note that Docker Desktop won't run if you do not agree to the terms. You can choose to accept the terms at a later date by opening Docker Desktop. +> [!NOTE] +> +> Windows containers are only supported in all-users installation mode. They are not available when Docker Desktop is installed per-user. - For more information, see [Docker Desktop Subscription Service Agreement](https://www.docker.com/legal/docker-subscription-service-agreement/). It is recommended that you read the [FAQs](https://www.docker.com/pricing/faq). +From the Docker Desktop menu, you can toggle which daemon (Linux or Windows) +the Docker CLI talks to. Select **Switch to Windows containers** to use Windows +containers, or select **Switch to Linux containers** to use Linux containers +(the default). -> [!TIP] +For more information on Windows containers, refer to the following documentation: + +- Microsoft documentation on [Windows containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/index). + +- [Build and Run Your First Windows Server Container (Blog Post)](https://www.docker.com/blog/build-your-first-docker-windows-server-container/) + gives a quick tour of how to build and run native Docker Windows containers on Windows 10 and Windows Server 2016 evaluation releases. + +- [Getting Started with Windows Containers (Lab)](https://github.com/docker/labs/blob/master/windows/windows-containers/README.md) + shows you how to use the [MusicStore](https://github.com/aspnet/MusicStore/) + application with Windows containers. The MusicStore is a standard .NET application and, + [forked here to use containers](https://github.com/friism/MusicStore), is a good example of a multi-container application. + +- To understand how to connect to Windows containers from the local host, see + [I want to connect to a container from Windows](/manuals/desktop/features/networking.md#i-want-to-connect-to-a-container-from-the-host) + +> [!NOTE] > -> As an IT administrator, you can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). -> - [Intune](https://learn.microsoft.com/en-us/mem/intune/apps/app-discovered-apps) -> - [Jamf](https://docs.jamf.com/10.25.0/jamf-pro/administrator-guide/Application_Usage.html) -> - [Kandji](https://support.kandji.io/support/solutions/articles/72000559793-view-a-device-application-list) -> - [Kolide](https://www.kolide.com/features/device-inventory/properties/mac-apps) -> - [Workspace One](https://blogs.vmware.com/euc/2022/11/how-to-use-workspace-one-intelligence-to-manage-app-licenses-and-reduce-costs.html) +> When you switch to Windows containers, **Settings** only shows those tabs that are active and apply to your Windows containers. + +If you set proxies or daemon configuration in Windows containers mode, these +apply only on Windows containers. If you switch back to Linux containers, +proxies and daemon configurations return to what you had set for Linux +containers. Your Windows container settings are retained and become available +again when you switch back. ## Where to go next -- Explore [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Explore [Docker's subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopWindowsInstall) to see what Docker can offer you. - [Get started with Docker](/get-started/introduction/_index.md). - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, and diff --git a/content/manuals/desktop/setup/install/windows-permission-requirements.md b/content/manuals/desktop/setup/install/windows-permission-requirements.md index 9ae094f312e..b29f4aa6ce1 100644 --- a/content/manuals/desktop/setup/install/windows-permission-requirements.md +++ b/content/manuals/desktop/setup/install/windows-permission-requirements.md @@ -12,18 +12,56 @@ weight: 40 This page contains information about the permission requirements for running and installing Docker Desktop on Windows, the functionality of the privileged helper process `com.docker.service`, and the reasoning behind this approach. -It also provides clarity on running containers as `root` as opposed to having `Administrator` access on the host and the privileges of the Windows Docker engine and Windows containers. +It also provides clarity on running containers as `root` as opposed to having `Administrator` access on the host and the privileges of Docker Engine and Windows containers. Docker Desktop on Windows is designed with security in mind. Administrative rights are only required when absolutely necessary. ## Permission requirements -While Docker Desktop on Windows can be run without having `Administrator` privileges, it does require them during installation. On installation you receive a UAC prompt which allows a privileged helper service to be installed. After that, Docker Desktop can be run without administrator privileges, provided you are members of the `docker-users` group. If you performed the installation, you are automatically added to this group, but other users must be added manually. This allows the administrator to control who has access to Docker Desktop. - -The reason for this approach is that Docker Desktop needs to perform a limited set of privileged operations which are conducted by the privileged helper process `com.docker.service`. This approach allows, following the principle of least privilege, `Administrator` access to be used only for the operations for which it is absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. +The permissions required to install and run Docker Desktop depend on which [installation mode](/manuals/desktop/setup/install/windows-install.md#installation-modes) you use. + +### Per-user installation (Beta) + +In per-user mode, Docker Desktop installs to `%LOCALAPPDATA%\Programs\DockerDesktop` and writes only to current-user registry keys (`HKCU`). This means: + +- No administrator privileges are required to install or update Docker Desktop. +- After installation, Docker Desktop can be run without administrator privileges. +- Some settings marked **Requires password** in **Settings** still require elevation. When you change one of these settings and select **Apply**, Docker Desktop opens a UAC prompt for administrator access. + +Per-user installation does not install the privileged helper service `com.docker.service` automatically. As a result, features that depend on it, such as the Hyper-V backend and Windows containers, are not available. For most users this is not a limitation, as the WSL 2 backend covers the majority of use cases. + +### All-users installation + +In all-users mode, Docker Desktop installs to `C:\Program Files\Docker\Docker` and writes to Local Machine registry keys (`HKLM`). Both locations require administrator privileges to modify, so: + +- Administrator privileges are required to install and update Docker Desktop. +- On installation you receive a UAC prompt which allows the privileged helper service `com.docker.service` to be installed. +- After installation, Docker Desktop can be run without administrator privileges. + +Running Docker Desktop without the privileged helper does not require users to have `docker-users` group membership. However, some features that require privileged operations will have this requirement. + +If you performed the installation, you are automatically added to the `docker-users` group, but other users must be added manually. This allows the administrator to control who has access to features that require higher privileges, such as creating and managing the Hyper-V VM, or using Windows containers. + +When Docker Desktop launches, all non-privileged named pipes are created so that only the following users can access them: +- The user that launched Docker Desktop. +- Members of the local `Administrators` group. +- The `LOCALSYSTEM` account. + +### Operations that always require elevation + +The following require administrator privileges regardless of installation mode. + +- Enabling WSL 2 for the first time: WSL 2 must be enabled on the machine before Docker Desktop can run. This is a one-time, per-machine operation. Once WSL 2 is enabled, it does not need to be enabled again for subsequent Docker Desktop installs or updates. +- Settings marked **Requires password**: Certain Docker Desktop settings affect system-level configuration and require administrator credentials to apply. These are clearly marked **Requires password**. When you change one of these settings and select **Apply**, Docker Desktop prompts for administrator credentials. ## Privileged helper +Docker Desktop needs to perform a limited set of privileged operations which are conducted by the privileged helper process `com.docker.service`. This approach allows, following the principle of least privilege, `Administrator` access to be used only for the operations for which it is absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. + +> [!NOTE] +> +> `com.docker.service` is only installed in all-users installation mode. It is not used in per-user installation, which instead relies solely on the WSL 2 backend and does not support Hyper-V or Windows containers. + The privileged helper `com.docker.service` is a Windows service which runs in the background with `SYSTEM` privileges. It listens on the named pipe `//./pipe/dockerBackendV2`. The developer runs the Docker Desktop application, which connects to the named pipe and sends commands to the service. This named pipe is protected, and only users that are part of the `docker-users` group can have access to it. The service performs the following functionalities: @@ -57,7 +95,7 @@ into Docker containers still retain their original permissions. Containers don' ## Enhanced Container Isolation In addition, Docker Desktop supports [Enhanced Container Isolation -mode](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) (ECI), +mode](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) (ECI), available to Business customers only, which further secures containers without impacting developer workflows. @@ -73,6 +111,10 @@ isolated from the Docker daemon and other services running inside the VM. > > Enabling Windows containers has important security implications. +> [!NOTE] +> +> Windows containers are only supported in all-users installation mode. They are not available when Docker Desktop is installed per-user. See [Installation modes](/manuals/desktop/setup/install/windows-install.md#installation-modes). + Unlike the Linux Docker Engine and containers which run in a VM, Windows containers are implemented using operating system features, and run directly on the Windows host. If you enable Windows containers during installation, the `ContainerAdministrator` user used for administration inside the container is a local administrator on the host machine. Enabling Windows containers during installation makes it so that members of the `docker-users` group are able to elevate to administrators on the host. For organizations who don't want their developers to run Windows containers, a `-–no-windows-containers` installer flag is available to disable their use. ## Networking diff --git a/content/manuals/desktop/setup/sign-in.md b/content/manuals/desktop/setup/sign-in.md index d05abf62592..f354c694ba9 100644 --- a/content/manuals/desktop/setup/sign-in.md +++ b/content/manuals/desktop/setup/sign-in.md @@ -32,11 +32,11 @@ aliases: Docker recommends signing in with the **Sign in** option in the top-right corner of the Docker Dashboard. -In large enterprises where admin access is restricted, administrators can [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +In large enterprises where admin access is restricted, administrators can [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). > [!TIP] > -> Explore [Docker's core subscriptions](https://www.docker.com/pricing/) to see what else Docker can offer you. +> Explore [Docker's core subscriptions](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopSignIn) to see what else Docker can offer you. ## Benefits of signing in @@ -44,7 +44,7 @@ In large enterprises where admin access is restricted, administrators can [enfor - Increase your pull rate limit compared to anonymous users. See [Usage and limits](/manuals/docker-hub/usage/_index.md). -- Enhance your organization’s security posture for containerized development with [Hardened Desktop](/manuals/security/for-admins/hardened-desktop/_index.md). +- Enhance your organization’s security posture for containerized development with [Hardened Desktop](/manuals/enterprise/security/hardened-desktop/_index.md). > [!NOTE] > @@ -71,10 +71,10 @@ Docker Desktop displays a warning if `pass` is not configured. 3ABCD1234EF56G78 uid Molly ``` -3. Copy the GPG ID and use it to initialize `pass` +3. Copy the GPG ID and use it to initialize `pass`. For example ```console - $ pass init + $ pass init 3ABCD1234EF56G78 ``` You should see output similar to: diff --git a/content/manuals/desktop/setup/vm-vdi.md b/content/manuals/desktop/setup/vm-vdi.md index b5197824776..80bddb97f74 100644 --- a/content/manuals/desktop/setup/vm-vdi.md +++ b/content/manuals/desktop/setup/vm-vdi.md @@ -4,26 +4,52 @@ keywords: nested virtualization, Docker Desktop, windows, VM, VDI environment title: Run Docker Desktop for Windows in a VM or VDI environment linkTitle: VM or VDI environments aliases: - - /desktop/nested-virtualization/ - - /desktop/vm-vdi/ + - /desktop/nested-virtualization/ + - /desktop/vm-vdi/ weight: 30 --- -Docker recommends running Docker Desktop natively on Mac, Linux, or Windows. However, Docker Desktop for Windows can run inside a virtual desktop provided the virtual desktop is properly configured. +Docker recommends running Docker Desktop natively on Mac, Linux, or Windows. However, Docker Desktop for Windows can run inside a virtual desktop provided the virtual desktop is properly configured. -To run Docker Desktop in a virtual desktop environment, it is essential nested virtualization is enabled on the virtual machine that provides the virtual desktop. This is because, under the hood, Docker Desktop is using a Linux VM in which it runs Docker Engine and the containers. +To run Docker Desktop in a virtual desktop environment, you have two options, +depending on whether nested virtualization is supported: -## Virtual desktop support +- If your environment supports nested virtualization, you can run Docker Desktop + with its default local Linux VM. +- If nested virtualization is not supported, Docker recommends subscribing to and using Docker Offload. + +## Use Docker Offload + +[Docker Offload](/offload/) lets you offload container workloads to a high-performance, fully hosted cloud environment, +enabling a seamless hybrid experience. + +Docker Offload is useful in virtual desktop environments where nested virtualization isn't supported. In these +environments, Docker Desktop can use Docker Offload to ensure you can still build and run containers without relying on +local virtualization. + +Docker Offload decouples the Docker Desktop client from the Docker Engine, +allowing the Docker CLI and Docker Desktop Dashboard to interact with +cloud-based resources as if they were local. When you run a container, Docker +provisions a secure, isolated, and ephemeral cloud environment connected to +Docker Desktop via an SSH tunnel. Despite running remotely, features like bind +mounts and port forwarding continue to work seamlessly, providing a local-like +experience. To use Docker Offload: + +For more information, see the [Docker Offload product +page](https://www.docker.com/products/docker-offload/) and the [Docker Offload +documentation](/offload/). + +## Virtual desktop support when using nested virtualization > [!NOTE] > > Support for running Docker Desktop on a virtual desktop is available to Docker Business customers, on VMware ESXi or Azure VMs only. -Docker support includes installing and running Docker Desktop within the VM, provided that nested virtualization is correctly enabled. The only hypervisors successfully tested are VMware ESXi and Azure, and there is no support for other VMs. For more information on Docker Desktop support, see [Get support](/manuals/desktop/troubleshoot-and-support/support.md). +Docker support includes installing and running Docker Desktop within the VM, provided that nested virtualization is correctly enabled. The only hypervisors successfully tested are VMware ESXi and Azure, and there is no support for other VMs. For more information on Docker Desktop support, see [Get support](/manuals/support/_index.md). For troubleshooting problems and intermittent failures that are outside of Docker's control, you should contact your hypervisor vendor. Each hypervisor vendor offers different levels of support. For example, Microsoft supports running nested Hyper-V both on-prem and on Azure, with some version constraints. This may not be the case for VMware ESXi. -Docker does not support running multiple instances of Docker Desktop on the same machine in a VM or VDI environment. +Docker does not support running multiple instances of Docker Desktop on the same machine in a VM or VDI environment. > [!TIP] > @@ -33,13 +59,14 @@ Docker does not support running multiple instances of Docker Desktop on the same ## Turn on nested virtualization -You must turn on nested virtualization before you install Docker Desktop on a virtual machine. +You must turn on nested virtualization before you install Docker Desktop on a +virtual machine that will not use Docker Cloud. ### Turn on nested virtualization on VMware ESXi Nested virtualization of other hypervisors like Hyper-V inside a vSphere VM [is not a supported scenario](https://kb.vmware.com/s/article/2009916). However, running Hyper-V VM in a VMware ESXi VM is technically possible and, depending on the version, ESXi includes hardware-assisted virtualization as a supported feature. A VM that had 1 CPU with 4 cores and 12GB of memory was used for internal testing. -For steps on how to expose hardware-assisted virtualization to the guest OS, [see VMware's documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-2A98801C-68E8-47AF-99ED-00C63E4857F6.html). +For steps on how to expose hardware-assisted virtualization to the guest OS, [see VMware's documentation](https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere/7-0/expose-hardware-assisted-virtualization.html). ### Turn on nested virtualization on an Azure Virtual Machine @@ -55,16 +82,16 @@ If using Windows container mode, confirm that the Nutanix environment supports H ### Supported configurations -Docker Desktop follows the VDI support definitions outlined [previously](#virtual-desktop-support): +Docker Desktop follows the VDI support definitions outlined [previously](#virtual-desktop-support-when-using-nested-virtualization): - - Persistent VDI environments (Supported): You receive the same virtual desktop instance across sessions, preserving installed software and configurations. +- Persistent VDI environments (Supported): You receive the same virtual desktop instance across sessions, preserving installed software and configurations. - - Non-persistent VDI environments (Not supported): Docker Desktop does not support environments where the OS resets between sessions, requiring re-installation or reconfiguration each time. +- Non-persistent VDI environments (Not supported): Docker Desktop does not support environments where the OS resets between sessions, requiring re-installation or reconfiguration each time. ### Support scope and responsibilities For WSL 2-related issues, contact Nutanix support. For Docker Desktop-specific issues, contact Docker support. -## Aditional resources +## Additional resources -- [Docker Desktop on Microsoft Dev Box](/manuals/desktop/features/dev-box.md) \ No newline at end of file +- [Docker Desktop on Microsoft Dev Box](/manuals/enterprise/enterprise-deployment/dev-box.md) diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/general.md b/content/manuals/desktop/troubleshoot-and-support/faqs/general.md index d21afd8e306..4005e3fc972 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/general.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/general.md @@ -18,27 +18,12 @@ weight: 10 Yes, you can use Docker Desktop offline. However, you cannot access features that require an active internet connection. Additionally, any functionality that requires you to sign in won't work while using Docker Desktop offline or in air-gapped environments. -This includes: - -- The resources in the [Learning Center](/manuals/desktop/use-desktop/_index.md) -- Pulling or pushing an image to Docker Hub -- [Image Access Management](/manuals/security/for-developers/access-tokens.md) -- [Static vulnerability scanning](/manuals/docker-hub/repos/manage/vulnerability-scanning.md) -- Viewing remote images in the Docker Dashboard -- Setting up [Dev Environments](/manuals/desktop/features/dev-environments/_index.md) -- Docker Build when using [BuildKit](/manuals/build/buildkit/_index.md#getting-started). - You can work around this by disabling BuildKit. Run `DOCKER_BUILDKIT=0 docker build .` to disable BuildKit. -- [Kubernetes](/manuals/desktop/features/kubernetes.md) (Images are download when you enable Kubernetes for the first time) -- Checking for updates -- [In-app diagnostics](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md#diagnose-from-the-app) (including the [Self-diagnose tool](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md#diagnose-from-the-app)) -- Sending usage statistics -- When `networkMode` is set to `mirrored` ### How do I connect to the remote Docker Engine API? To connect to the remote Engine API, you might need to provide the location of the Engine API for Docker clients and development tools. -Mac and Windows WSL 2 users can connect to the Docker Engine through a Unix socket: `unix:///var/run/docker.sock`. +Mac and Windows WSL 2 users can connect to the Docker Engine through a Unix socket: `unix:///var/run/docker.sock`. Docker Desktop for Linux uses a [per-user socket](linuxfaqs.md#how-do-i-use-docker-sdks-with-docker-desktop-for-linux) located at `~/.docker/desktop/docker.sock` instead of the system-wide `/var/run/docker.sock`. If you are working with applications like [Apache Maven](https://maven.apache.org/) that expect settings for `DOCKER_HOST` and `DOCKER_CERT_PATH` environment @@ -61,12 +46,53 @@ The host has a changing IP address, or none if you have no network access. It is recommend that you connect to the special DNS name `host.docker.internal`, which resolves to the internal IP address used by the host. -For more information and examples, see [how to connect from a container to a service on the host](/manuals/desktop/features/networking.md#i-want-to-connect-from-a-container-to-a-service-on-the-host). +For more information and examples, see [how to connect from a container to a service on the host](/manuals/desktop/features/networking.md#connect-a-container-to-a-service-on-the-host). ### Can I pass through a USB device to a container? Docker Desktop does not support direct USB device passthrough. However, you can use USB over IP to connect common USB devices to the Docker Desktop VM and in turn be forwarded to a container. For more details, see [Using USB/IP with Docker Desktop](/manuals/desktop/features/usbip.md). +### How do I verify Docker Desktop is using a proxy server ? + +To verify, look at the most recent events logged in `httpproxy.log`. This is located at `~/Library/Containers/com.docker.docker/Data/log/host` on macOS or `%LOCALAPPDATA%/Docker/log/host/` on Windows. + +The following shows a few examples of what you can expect to see: + +- Docker Desktop using app level settings (proxy mode manual) for proxy: + + ```console + host will use proxy: app settings http_proxy=http://172.211.16.3:3128 https_proxy=http://172.211.16.3:3128 + Linux will use proxy: app settings http_proxy=http://172.211.16.3:3128 https_proxy=http://172.211.16.3:3128 + ``` + +- Docker Desktop using system level settings (proxy mode system) for proxy: + + ```console + host will use proxy: static system http_proxy=http://172.211.16.3:3128 https_proxy=http://172.211.16.3:3128 no_proxy= + Linux will use proxy: static system http_proxy=http://172.211.16.3:3128 https_proxy=http://172.211.16.3:3128 no_proxy= + ``` + +- Docker Desktop is not configured to use a proxy server: + + ```console + host will use proxy: disabled + Linux will use proxy: disabled + ``` + +- Docker Desktop is configured to use app level settings (proxy mode manual) and using a PAC file: + + ```console + using a proxy PAC file: http://127.0.0.1:8081/proxy.pac + host will use proxy: app settings from PAC file http://127.0.0.1:8081/proxy.pac + Linux will use proxy: app settings from PAC file http://127.0.0.1:8081/proxy.pac + ``` + +- Connect request using the configured proxy server: + + ```console + CONNECT desktop.docker.com:443: host connecting via static system HTTPS proxy http://172.211.16.3:3128 + ``` + ### How do I run Docker Desktop without administrator privileges? Docker Desktop requires administrator privileges only for installation. Once installed, administrator privileges are not needed to run it. However, for non-admin users to run Docker Desktop, it must be installed using a specific installer flag and meet certain prerequisites, which vary by platform. @@ -84,7 +110,7 @@ You can then sign in to your machine with the user ID specified, and launch Dock > [!NOTE] > -> Before launching Docker Desktop, if a `settings-store.json` file (or `settings.json` for Docker Desktop versions 4.34 and earlier) already exists in the `~/Library/Group Containers/group.com.docker/` directory, you will see a **Finish setting up Docker Desktop** window that prompts for administrator privileges when you select **Finish**. To avoid this, ensure you delete the `settings-store.json` file (or `settings.json` for Docker Desktop versions 4.34 and earlier) left behind from any previous installations before launching the application. +> Before launching Docker Desktop, if a `settings-store.json` file already exists in the `~/Library/Group Containers/group.com.docker/` directory, you will see a **Finish setting up Docker Desktop** window that prompts for administrator privileges when you select **Finish**. To avoid this, ensure you delete the `settings-store.json` file left behind from any previous installations before launching the application. {{< /tab >}} {{< tab name="Windows" >}} diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md b/content/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md index 5257cfb3206..177006de4db 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md @@ -5,8 +5,8 @@ title: FAQs for Docker Desktop for Linux linkTitle: Linux tags: [FAQ] aliases: -- /desktop/linux/space/ -- /desktop/faqs/linuxfaqs/ + - /desktop/linux/space/ + - /desktop/faqs/linuxfaqs/ weight: 40 --- @@ -16,32 +16,32 @@ Docker Desktop for Linux runs a Virtual Machine (VM) for the following reasons: 1. To ensure that Docker Desktop provides a consistent experience across platforms. - During research, the most frequently cited reason for users wanting Docker Desktop for Linux was to ensure a consistent Docker Desktop - experience with feature parity across all major operating systems. Utilizing - a VM ensures that the Docker Desktop experience for Linux users will closely - match that of Windows and macOS. + During research, the most frequently cited reason for users wanting Docker Desktop for Linux was to ensure a consistent Docker Desktop + experience with feature parity across all major operating systems. Utilizing + a VM ensures that the Docker Desktop experience for Linux users will closely + match that of Windows and macOS. 2. To make use of new kernel features. - Sometimes we want to make use of new operating system features. Because we control the kernel and the OS inside the VM, we can roll these out to all users immediately, even to users who are intentionally sticking on an LTS version of their machine OS. + Because Docker controls the kernel and the OS inside the VM, Docker can roll these out to all users immediately, even to users who are intentionally sticking on an LTS version of their machine OS. 3. To enhance security. - Container image vulnerabilities pose a security risk for the host environment. There is a large number of unofficial images that are not guaranteed to be verified for known vulnerabilities. Malicious users can push images to public registries and use different methods to trick users into pulling and running them. The VM approach mitigates this threat as any malware that gains root privileges is restricted to the VM environment without access to the host. + Container image vulnerabilities pose a security risk for the host environment. There is a large number of unofficial images that are not guaranteed to be verified for known vulnerabilities. Malicious users can push images to public registries and use different methods to trick users into pulling and running them. The VM approach mitigates this threat as any malware that gains root privileges is restricted to the VM environment without access to the host. - Why not run rootless Docker? Although this has the benefit of superficially limiting access to the root user so everything looks safer in "top", it allows unprivileged users to gain `CAP_SYS_ADMIN` in their own user namespace and access kernel APIs which are not expecting to be used by unprivileged users, resulting in [vulnerabilities](https://www.openwall.com/lists/oss-security/2022/01/18/7). + Why not run rootless Docker? Although this has the benefit of superficially limiting access to the root user so everything looks safer in "top", it allows unprivileged users to gain `CAP_SYS_ADMIN` in their own user namespace and access kernel APIs which are not expecting to be used by unprivileged users, resulting in [vulnerabilities](https://www.openwall.com/lists/oss-security/2022/01/18/7). 4. To provide the benefits of feature parity and enhanced security, with minimal impact on performance. - The VM utilized by Docker Desktop for Linux uses [`VirtioFS`](https://virtio-fs.gitlab.io), a shared file system that allows virtual machines to access a directory tree located on the host. Our internal benchmarking shows that with the right resource allocation to the VM, near native file system performance can be achieved with VirtioFS. + The VM utilized by Docker Desktop for Linux uses [`VirtioFS`](https://virtio-fs.gitlab.io), a shared file system that allows virtual machines to access a directory tree located on the host. Docker's internal benchmarking shows that with the right resource allocation to the VM, near native file system performance can be achieved with VirtioFS. - As such, we have adjusted the default memory available to the VM in Docker Desktop for Linux. You can tweak this setting to your specific needs by using the **Memory** slider within the **Settings** > **Resources** tab of Docker Desktop. + As such, the default memory available to the VM in Docker Desktop for Linux is adjusted. You can tweak this setting to your specific needs by using the **Memory** slider within the **Settings** > **Resources** tab of Docker Desktop. ### How do I enable file sharing? Docker Desktop for Linux uses [VirtioFS](https://virtio-fs.gitlab.io/) as the default (and currently only) mechanism to enable file sharing between the host -and Docker Desktop VM. +and Docker Desktop VM. {{< accordion title="Additional information for Docker Desktop version 4.34 and earlier" >}} @@ -59,7 +59,7 @@ mappings for IDs greater than 0 in the containers. | ID in container | ID on host | | --------------- | -------------------------------------------------------------------------------- | -| 0 (root) | ID of the user running Docker Desktop (e.g. 1000) | +| 0 (root) | ID of the user running Docker Desktop (e.g. 1000) | | 1 | 0 + beginning of ID range specified in `/etc/subuid`/`/etc/subgid` (e.g. 100000) | | 2 | 1 + beginning of ID range specified in `/etc/subuid`/`/etc/subgid` (e.g. 100001) | | 3 | 2 + beginning of ID range specified in `/etc/subuid`/`/etc/subgid` (e.g. 100002) | @@ -95,6 +95,34 @@ ACL (see `setfacl(1)`) for folders shared with the Docker Desktop VM. {{< /accordion >}} +### How do I use Docker SDKs with Docker Desktop for Linux? + +Docker Desktop for Linux uses a per-user socket located at `~/.docker/desktop/docker.sock` instead of the system-wide `/var/run/docker.sock`. The Docker CLI handles this automatically through the `desktop-linux` context, but Docker SDKs and other tools that connect directly to the Docker daemon also need the `DOCKER_HOST` environment variable set. + +Without setting `DOCKER_HOST`, SDKs attempt to connect to `/var/run/docker.sock` and fail with an error like: + +```text +Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running? +``` + +To fix this, set the `DOCKER_HOST` environment variable before running your SDK-based application: + +```console +export DOCKER_HOST=unix://$HOME/.docker/desktop/docker.sock +``` + +Or dynamically retrieve it from the `desktop-linux` context: + +```console +export DOCKER_HOST=$(docker context inspect desktop-linux --format '{{ .Endpoints.docker.Host }}') +``` + +To make this permanent, add the export command to your shell profile (`~/.bashrc`, `~/.zshrc`, or similar): + +```console +echo 'export DOCKER_HOST=unix://$HOME/.docker/desktop/docker.sock' >> ~/.bashrc +``` + ### Where does Docker Desktop store Linux containers? Docker Desktop stores Linux containers and images in a single, large "disk image" file in the Linux filesystem. This is different from Docker on Linux, which usually stores containers and images in the `/var/lib/docker` directory on the host's filesystem. @@ -117,11 +145,11 @@ If the disk image file is too large, you can: To move the disk image file to a different location: -1. Select **Settings** then **Advanced** from the **Resources** tab. +1. Select **Settings** then **Advanced** from the **Resources** tab. 2. In the **Disk image location** section, select **Browse** and choose a new location for the disk image. -3. Select **Apply & Restart** for the changes to take effect. +3. Select **Apply** for the changes to take effect. Do not move the file directly in Finder as this can cause Docker Desktop to lose track of the file. @@ -183,6 +211,6 @@ To reduce the maximum size of the disk image file: 2. The **Disk image size** section contains a slider that allows you to change the maximum size of the disk image. Adjust the slider to set a lower limit. -3. Select **Apply & Restart**. +3. Select **Apply**. When you reduce the maximum size, the current disk image file is deleted, and therefore, all containers and images are lost. diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md b/content/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md index d11a00f7db2..f9fea6020fa 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md @@ -48,7 +48,7 @@ To move the disk image file to a different location: 2. In the **Disk image location** section, select **Browse** and choose a new location for the disk image. -3. Select **Apply & Restart** for the changes to take effect. +3. Select **Apply** for the changes to take effect. > [!IMPORTANT] > @@ -112,7 +112,7 @@ To reduce the maximum size of the disk image file: 2. The **Disk image size** section contains a slider that allows you to change the maximum size of the disk image. Adjust the slider to set a lower limit. -3. Select **Apply & Restart**. +3. Select **Apply**. When you reduce the maximum size, the current disk image file is deleted, and therefore, all containers and images are lost. diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/releases.md b/content/manuals/desktop/troubleshoot-and-support/faqs/releases.md index ed4cf4ed76e..da2270f5bae 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/releases.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/releases.md @@ -8,7 +8,7 @@ tags: [FAQ] ### How frequent will new releases be? -New releases are available roughly every month, unless there are critical fixes that need to be released sooner. +New releases are available every week, unless there are critical fixes that need to be released sooner. The **Automatically check for updates** setting in the **Software updates** tab is turned on by default. This means you receive notifications in the Docker menu and a notification badge on the Docker Desktop Dashboard when a new version is available. diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md b/content/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md index ea1da99fc25..8349042948b 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md @@ -48,8 +48,8 @@ in the Docker Engine topics. ### How do I add client certificates? You can add your client certificates -in `~/.docker/certs.d//client.cert` and -`~/.docker/certs.d//client.key`. You do not need to push your certificates with `git` commands. +in `~/.docker/certs.d/:/client.cert` and +`~/.docker/certs.d/:/client.key`. You do not need to push your certificates with `git` commands. When the Docker Desktop application starts, it copies the `~/.docker/certs.d` folder on your Windows system to the `/etc/docker/certs.d` diff --git a/content/manuals/desktop/troubleshoot-and-support/feedback.md b/content/manuals/desktop/troubleshoot-and-support/feedback.md index edba3f2d070..bb6d9bdcf30 100644 --- a/content/manuals/desktop/troubleshoot-and-support/feedback.md +++ b/content/manuals/desktop/troubleshoot-and-support/feedback.md @@ -12,7 +12,7 @@ There are many ways you can provide feedback on Docker Desktop or Docker Desktop ### In-product feedback -On each Docker Desktop Dashboard view, there is a **Give feedback** link. This opens a feedback form where you can share ideas directly with the Docker team. +On each Docker Desktop Dashboard view, there is a **Give feedback** link. This opens a feedback form where you can share ideas directly with the Docker Team. ### Feedback via Docker Community forums @@ -26,12 +26,7 @@ discussion, sign in to the appropriate Docker forums: ### Report bugs or problems on GitHub To report bugs or problems, visit: -- [Docker Desktop for Mac issues on -GitHub](https://github.com/docker/for-mac/issues) -- [Docker Desktop for Windows issues on GitHub](https://github.com/docker/for-win/issues) -- [Docker Desktop for Linux issues on -GitHub](https://github.com/docker/desktop-linux/issues) -- [Dev Environments issues on GitHub](https://github.com/docker/dev-environments/issues) +- [Docker Desktop issues on GitHub](https://github.com/docker/desktop-feedback) - [Docker Extensions issues on GitHub](https://github.com/docker/extensions-sdk/issues) ### Feedback via Community Slack channels @@ -41,5 +36,4 @@ You can also provide feedback through the following [Docker Community Slack](htt - #docker-desktop-mac - #docker-desktop-windows - #docker-desktop-linux -- #docker-dev-environments - #extensions diff --git a/content/manuals/desktop/troubleshoot-and-support/support.md b/content/manuals/desktop/troubleshoot-and-support/support.md deleted file mode 100644 index 02b7a8033a7..00000000000 --- a/content/manuals/desktop/troubleshoot-and-support/support.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -description: See what support is available for Docker Desktop -keywords: Support, Docker Desktop, Linux, Mac, Windows -title: Get support for Docker Desktop -weight: 20 -aliases: - - /desktop/support/ - - /support/ ---- - -> [!NOTE] -> -> Docker Desktop offers support for developers with a [Pro, Team, or Business subscription](https://www.docker.com/pricing?utm_source=docker&utm_medium=webreferral&utm_campaign=docs_driven_upgrade_desktop_support). - -### How do I get Docker Desktop support? - -> [!TIP] -> -> Before reaching out for support, follow the appropriate [Diagnose steps](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md#diagnose) in the troubleshooting documentation. - -If you have a paid Docker subscription, you can [contact the Support team](https://hub.docker.com/support/contact/). - -All Docker users can seek support through the following resources, where Docker or the community respond on a best effort basis. -- [Docker Desktop for Windows GitHub repo](https://github.com/docker/for-win) -- [Docker Desktop for Mac GitHub repo](https://github.com/docker/for-mac) -- [Docker Desktop for Linux GitHub repo](https://github.com/docker/desktop-linux) -- [Docker Community Forums](https://forums.docker.com/) -- [Docker Community Slack](http://dockr.ly/comm-slack) - - -### What support can I get? - -- Account management related issues -- Automated builds -- Basic product 'how to' questions -- Billing or subscription issues -- Configuration issues -- Desktop installation issues - - Installation crashes - - Failure to launch Docker Desktop on first run -- Desktop update issues -- Sign-in issues in both the command line interface and Docker Hub user interface -- Push or pull issues, including rate limiting -- Usage issues - - Crash closing software - - Docker Desktop not behaving as expected - -For Windows users, you can also request support on: -- Turning on virtualization in BIOS -- Turning on Windows features -- Running inside [certain VM or VDI environments](/manuals/desktop/setup/vm-vdi.md) (Docker Business customers only) - -### What is not supported? - -Docker Desktop excludes support for the following types of issues: - -- Use on or in conjunction with hardware or software other than that specified in the applicable documentation -- Running on unsupported operating systems, including beta/preview versions of operating systems -- Running containers of a different architecture using emulation -- Support for Docker Engine, Docker CLI, or other bundled Linux components -- Support for Kubernetes -- Features labeled as experimental -- System/Server administration activities -- Supporting Desktop as a production runtime -- Scale deployment/multi-machine installation of Desktop -- Routine product maintenance (data backup, cleaning disk space and configuring log rotation) -- Third-party applications not provided by Docker -- Altered or modified Docker software -- Defects in the Docker software due to hardware malfunction, abuse, or improper use -- Any version of the Docker software other than the latest version -- Reimbursing and expenses spent for third-party services not provided by Docker -- Docker support excludes training, customization, and integration -- Running multiple instances of Docker Desktop on a single machine - -> [!NOTE] -> -> Support for [running Docker Desktop in a VM or VDI environment](/manuals/desktop/setup/vm-vdi.md) is only available to Docker Business customers. - -### What versions are supported? - -For Docker Business customers, Docker offers support for versions up to six months older than the latest version, although any fixes will be on the latest version. - -For Pro and Team customers, Docker only offers support for the latest version of Docker Desktop. If you are running an older version, Docker may ask you to update before investigating your support request. - -### How many machines can I get support for Docker Desktop on? - -As a Pro user you can get support for Docker Desktop on a single machine. -As a Team, you can get support for Docker Desktop for the number of machines equal to the number of seats as part of your plan. - -### What OS’s are supported? - -Docker Desktop is available for Mac, Linux, and Windows. The supported version information can be found on the following pages: - -- [Mac system requirements](/manuals/desktop/setup/install/mac-install.md#system-requirements) -- [Windows system requirements](/manuals/desktop/setup/install/windows-install.md#system-requirements) -- [Linux system requirements](/manuals/desktop/setup/install/linux/_index.md#system-requirements) - -### How is personal diagnostic data handled in Docker Desktop when I'm getting support? - -When uploading diagnostics to help Docker with investigating issues, the uploaded diagnostics bundle may contain personal data such as usernames and IP addresses. The diagnostics bundles are only accessible to Docker, Inc. -employees who are directly involved in diagnosing Docker Desktop issues. - -By default, Docker, Inc. will delete uploaded diagnostics bundles after 30 days. You may also request the removal of a diagnostics bundle by either specifying the diagnostics ID or via your GitHub ID (if the diagnostics ID is mentioned in a GitHub issue). Docker, Inc. will only use the data in the diagnostics bundle to investigate specific user issues but may derive high-level (non personal) metrics such as the rate of issues from it. - -For more information, see [Docker Data Processing Agreement](https://www.docker.com/legal/data-processing-agreement). diff --git a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md index 38d82183113..15fc495e4d2 100644 --- a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md +++ b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md @@ -1,31 +1,28 @@ --- -description: Understand how to diagnose and troubleshoot Docker Desktop, and how to +description: + Understand how to diagnose and troubleshoot Docker Desktop, and how to check the logs. keywords: Linux, Mac, Windows, troubleshooting, logs, issues, Docker Desktop toc_max: 2 title: Troubleshoot Docker Desktop linkTitle: Troubleshoot and diagnose aliases: - - /desktop/linux/troubleshoot/ - - /desktop/mac/troubleshoot/ - - /desktop/windows/troubleshoot/ - - /docker-for-mac/troubleshoot/ - - /mackit/troubleshoot/ - - /windows/troubleshoot/ - - /docker-for-win/troubleshoot/ - - /docker-for-windows/troubleshoot/ - - /desktop/troubleshoot/overview/ - - /desktop/troubleshoot/ -tags: [ Troubleshooting ] + - /desktop/linux/troubleshoot/ + - /desktop/mac/troubleshoot/ + - /desktop/windows/troubleshoot/ + - /docker-for-mac/troubleshoot/ + - /mackit/troubleshoot/ + - /windows/troubleshoot/ + - /docker-for-win/troubleshoot/ + - /docker-for-windows/troubleshoot/ + - /desktop/troubleshoot/overview/ + - /desktop/troubleshoot/ +tags: [Troubleshooting] weight: 10 --- This page contains information on how to diagnose and troubleshoot Docker Desktop, and how to check the logs. -> [!WARNING] -> -> If you're experiencing malware detection issues on Mac, follow the steps documented in [docker/for-mac#7527](https://github.com/docker/for-mac/issues/7527). - ## Troubleshoot menu To navigate to **Troubleshoot** either: @@ -40,67 +37,81 @@ The **Troubleshooting** menu contains the following options: - **Reset Kubernetes cluster**. Select to delete all stacks and Kubernetes resources. For more information, see [Kubernetes](/manuals/desktop/settings-and-maintenance/settings.md#kubernetes). - **Clean / Purge data**. This option resets all Docker data without a -reset to factory defaults. Selecting this option results in the loss of existing settings. + reset to factory defaults. Selecting this option results in the loss of existing settings. - **Reset to factory defaults**: Choose this option to reset all options on -Docker Desktop to their initial state, the same as when Docker Desktop was first installed. + Docker Desktop to their initial state, the same as when Docker Desktop was first installed. If you are a Mac or Linux user, you also have the option to **Uninstall** Docker Desktop from your system. ## Diagnose - + > [!TIP] > -> If you do not find a solution in troubleshooting, browse the GitHub repositories or create a new issue: -> -> - [docker/for-mac](https://github.com/docker/for-mac/issues) -> - [docker/for-win](https://github.com/docker/for-win/issues) -> - [docker/for-linux](https://github.com/docker/for-linux/issues) +> If you do not find a solution in troubleshooting, browse the GitHub repositories or create a new issue on the [Docker Desktop issue tracker](https://github.com/docker/desktop-feedback). ### Diagnose from the app 1. From **Troubleshoot**, select **Get support**. This opens the in-app Support page and starts collecting the diagnostics. + > [!NOTE] + > + > Gathering diagnostics may take several minutes. Don't close Docker Desktop while the diagnostics are being collected. 2. When the diagnostics collection process is complete, select **Upload to get a Diagnostic ID**. 3. When the diagnostics are uploaded, Docker Desktop prints a diagnostic ID. Copy this ID. 4. Use your diagnostics ID to get help: - - If you have a paid Docker subscription, select **Contact support**. This opens the Docker Desktop support form. Fill in the information required and add the ID you copied in step three to the **Diagnostics ID field**. Then, select **Submit ticket** to request Docker Desktop support. - > [!NOTE] - > - > You must be signed in to Docker Desktop to access the support form. For information on what's covered as part of Docker Desktop support, see [Support](/manuals/desktop/troubleshoot-and-support/support.md). - - If you don't have a paid Docker subscription, select **Report a Bug** to open a new Docker Desktop issue on GitHub. Complete the information required and ensure you add the diagnostic ID you copied in step three. + - If you have a paid Docker subscription, select **Contact support**. This opens the Docker Desktop support form. Fill in the information required and add the ID you copied in step three to the **Diagnostics ID field**. Then, select **Submit ticket** to request Docker Desktop support. + > [!NOTE] + > + > You must be signed in to Docker Desktop to access the support form. For information on what's covered as part of Docker Desktop support, see [Support](/manuals/support/_index.md). + - If you don't have a paid Docker subscription, select **Report a Bug** to open a new Docker Desktop issue on GitHub. Complete the information required and ensure you add the diagnostic ID you copied in step three. -### Diagnose from an error message +### Diagnose from an error message 1. When an error message appears, select **Gather diagnostics**. + > [!NOTE] + > + > Gathering diagnostics may take several minutes. Don't close Docker Desktop while the diagnostics are being collected. 2. When the diagnostics are uploaded, Docker Desktop prints a diagnostic ID. Copy this ID. 3. Use your diagnostics ID to get help: - - If you have a paid Docker subscription, select **Contact support**. This opens the Docker Desktop support form. Fill in the information required and add the ID you copied in step three to the **Diagnostics ID field**. Then, select **Submit ticket** to request Docker Desktop support. - > [!NOTE] - > - > You must be signed in to Docker Desktop to access the support form. For information on what's covered as part of Docker Desktop support, see [Support](/manuals/desktop/troubleshoot-and-support/support.md). - - If you don't have a paid Docker subscription, you can open a new Docker Desktop issue on GitHub for [Mac](https://github.com/docker/for-mac/issues), [Windows](https://github.com/docker/for-win/issues), or [Linux](https://github.com/docker/for-linux/issues). Complete the information required and ensure you add the diagnostic ID printed in step two. + - If you have a paid Docker subscription, select **Contact support**. This opens the Docker Desktop support form. Fill in the information required and add the ID you copied in step three to the **Diagnostics ID field**. Then, select **Submit ticket** to request Docker Desktop support. + > [!NOTE] + > + > You must be signed in to Docker Desktop to access the support form. For information on what's covered as part of Docker Desktop support, see [Support](/manuals/support/_index.md). + - If you don't have a paid Docker subscription, you can open a new [Docker Desktop issue on GitHub](https://github.com/docker/desktop-feedback). Complete the information required and ensure you add the diagnostic ID printed in step two. ### Diagnose from the terminal In some cases, it's useful to run the diagnostics yourself, for instance, if Docker Desktop cannot start. +> [!NOTE] +> +> Gathering diagnostics may take several minutes. Wait for the process to complete before closing the terminal. + {{< tabs group="os" >}} {{< tab name="Windows" >}} 1. Locate the `com.docker.diagnose` tool: ```console + # For all-user installations $ C:\Program Files\Docker\Docker\resources\com.docker.diagnose.exe + + # For per-user installations + $ %LOCALAPPDATA%\Programs\DockerDesktop\resources\com.docker.diagnose.exe ``` 2. Create and upload the diagnostics ID. In PowerShell, run: ```console + # For all-user installations $ & "C:\Program Files\Docker\Docker\resources\com.docker.diagnose.exe" gather -upload + + # For per-user installations + $ & %LOCALAPPDATA%\Programs\DockerDesktop\resources\com.docker.diagnose.exe" gather -upload ``` -After the diagnostics have finished, the terminal displays your diagnostics ID and the path to the diagnostics file. The diagnostics ID is composed of your user ID and a timestamp. For example `BE9AFAAF-F68B-41D0-9D12-84760E6B8740/20190905152051`. +After the diagnostics have finished, the terminal displays your diagnostics ID and the path to the diagnostics file. The diagnostics ID is composed of your user ID and a timestamp. For example `BE9AFAAF-F68B-41D0-9D12-84760E6B8740/20190905152051`. {{< /tab >}} {{< tab name="Mac" >}} @@ -117,7 +128,7 @@ After the diagnostics have finished, the terminal displays your diagnostics ID a $ /Applications/Docker.app/Contents/MacOS/com.docker.diagnose gather -upload ``` -After the diagnostics have finished, the terminal displays your diagnostics ID and the path to the diagnostics file. The diagnostics ID is composed of your user ID and a timestamp. For example `BE9AFAAF-F68B-41D0-9D12-84760E6B8740/20190905152051`. +After the diagnostics have finished, the terminal displays your diagnostics ID and the path to the diagnostics file. The diagnostics ID is composed of your user ID and a timestamp. For example `BE9AFAAF-F68B-41D0-9D12-84760E6B8740/20190905152051`. {{< /tab >}} {{< tab name="Linux" >}} @@ -134,11 +145,15 @@ After the diagnostics have finished, the terminal displays your diagnostics ID a $ /opt/docker-desktop/bin/com.docker.diagnose gather -upload ``` -After the diagnostics have finished, the terminal displays your diagnostics ID and the path to the diagnostics file. The diagnostics ID is composed of your user ID and a timestamp. For example `BE9AFAAF-F68B-41D0-9D12-84760E6B8740/20190905152051`. +After the diagnostics have finished, the terminal displays your diagnostics ID and the path to the diagnostics file. The diagnostics ID is composed of your user ID and a timestamp. For example `BE9AFAAF-F68B-41D0-9D12-84760E6B8740/20190905152051`. {{< /tab >}} {{< /tabs >}} +> [!TIP] +> +> You can also use the [`docker desktop diagnose` command](/manuals/desktop/features/desktop-cli.md) to diagnose Docker Desktop and upload the diagnostics ID. + To view the contents of the diagnostic file: {{< tabs group="os" >}} @@ -148,7 +163,7 @@ To view the contents of the diagnostic file: ```powershell $ Expand-Archive -LiteralPath "C:\Users\testUser\AppData\Local\Temp\5DE9978A-3848-429E-8776-950FC869186F\20230607101602.zip" -DestinationPath "C:\Users\testuser\AppData\Local\Temp\5DE9978A-3848-429E-8776-950FC869186F\20230607101602" - ``` + ``` 2. Open the file in your preferred text editor. Run: @@ -180,67 +195,14 @@ $ unzip –l /tmp/.zip #### Use your diagnostics ID to get help If you have a paid Docker subscription, select **Contact support**. This opens the Docker Desktop support form. Fill in the information required and add the ID you copied in step three to the **Diagnostics ID field**. Then, select **Submit ticket** to request Docker Desktop support. - -If you don't have a paid Docker subscription, create an issue on GitHub: -- [For Linux](https://github.com/docker/desktop-linux/issues) -- [For Mac](https://github.com/docker/for-mac/issues) -- [For Windows](https://github.com/docker/for-win/issues) +If you don't have a paid Docker subscription, create an issue on [GitHub](https://github.com/docker/desktop-feedback). ### Self-diagnose tool -Docker Desktop contains a self-diagnose tool which can help you identify some common problems. - -{{< tabs group="os" >}} -{{< tab name="Windows" >}} -1. Locate the `com.docker.diagnose` tool. - - ```console - $ C:\Program Files\Docker\Docker\resources\com.docker.diagnose.exe - ``` - -2. In PowerShell, run the self-diagnose tool: - - ```console - $ & "C:\Program Files\Docker\Docker\resources\com.docker.diagnose.exe" gather - ``` - -{{< /tab >}} -{{< tab name="Mac" >}} - -1. Locate the `com.docker.diagnose` tool. - - ```console - $ /Applications/Docker.app/Contents/MacOS/com.docker.diagnose - ``` - -2. Run the self-diagnose tool: - - ```console - $ /Applications/Docker.app/Contents/MacOS/com.docker.diagnose gather - ``` - -{{< /tab >}} -{{< tab name="Linux" >}} - -1. Locate the `com.docker.diagnose` tool. - -2. Run the self-diagnose tool: - - ```console - $ /opt/docker-desktop/bin/com.docker.diagnose gather - ``` - -{{< /tab >}} -{{< /tabs >}} - -The tool runs a suite of checks and displays **PASS** or **FAIL** next to each check. If there are any failures, it highlights the most relevant at the end of the report. - -You can then create an issue on GitHub: - -- [For Linux](https://github.com/docker/desktop-linux/issues) -- [For Mac](https://github.com/docker/for-mac/issues) -- [For Windows](https://github.com/docker/for-win/issues) +> [!IMPORTANT] +> +> This tool has been deprecated. ## Check the logs @@ -314,3 +276,5 @@ to learn how to view the Docker Daemon logs. - View specific [troubleshoot topics](topics.md). - View information on [known issues](known-issues.md) +- [Fix "Docker.app is damaged" on macOS](mac-damaged-dialog.md) - Resolve macOS installation issues +- [Get support for Docker products](/manuals/support/_index.md) diff --git a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md index d4d6afc5638..91235e55c0d 100644 --- a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md +++ b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md @@ -3,14 +3,16 @@ description: Find known issues for Docker Desktop keywords: mac, troubleshooting, known issues, Docker Desktop title: Known issues tags: [ Troubleshooting ] -weight: 30 +weight: 20 aliases: - /desktop/troubleshoot/known-issues/ --- {{< tabs >}} {{< tab name="For Mac with Intel chip" >}} -- The Mac Activity Monitor reports that Docker is using twice the amount of memory it's actually using. This is due to a [bug in macOS].(https://docs.google.com/document/d/17ZiQC1Tp9iH320K-uqVLyiJmk4DHJ3c4zgQetJiKYQM/edit?usp=sharing) on this. +- The Mac Activity Monitor reports that Docker is using twice the amount of memory it's actually using. This is due to a [bug in macOS](https://docs.google.com/document/d/17ZiQC1Tp9iH320K-uqVLyiJmk4DHJ3c4zgQetJiKYQM/edit?usp=sharing). + +- **"Docker.app is damaged" dialog**: If you see a "Docker.app is damaged and can't be opened" dialog during installation or updates, this is typically caused by non-atomic copy operations when other applications are using the Docker CLI. See [Fix "Docker.app is damaged" on macOS](mac-damaged-dialog.md) for resolution steps. - Force-ejecting the `.dmg` after running `Docker.app` from it can cause the whale icon to become unresponsive, Docker tasks to show as not responding in the Activity Monitor, and for some processes to consume a large amount of CPU resources. Reboot and restart Docker to resolve these issues. diff --git a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/mac-damaged-dialog.md b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/mac-damaged-dialog.md new file mode 100644 index 00000000000..68586e88240 --- /dev/null +++ b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/mac-damaged-dialog.md @@ -0,0 +1,72 @@ +--- +description: Fix "Docker.app is damaged and can't be opened. You should move it to the Trash" dialog on macOS +keywords: docker desktop mac, damaged app, move to trash, gatekeeper, installation issues, troubleshooting +title: Fix "Docker.app is damaged and can't be opened" on macOS +linkTitle: MacOS app damaged dialog +tags: [Troubleshooting] +weight: 30 +--- + +## Error message + +macOS shows the following dialog when you try to open Docker Desktop: + +```text +Docker.app is damaged and can't be opened. You should move it to the Trash. +``` + +This error prevents Docker Desktop from launching and can occur during installation or after updates. + +## Possible cause + +This issue occurs due to a non-atomic copy during a drag/drop installation. When you drag and drop `Docker.app` from a DMG file while another application, like VS Code, is invoking the Docker CLI through symlinks, the copy operation may be interrupted, leaving the app in a partially copied state that Gatekeeper marks as "damaged". + +## Solution + +Follow these steps to resolve the issue: + +### Step one: Quit third-party software + +Close any applications that might call Docker in the background: + +- Visual Studio Code and other IDEs +- Terminal applications +- Agent apps or development tools +- Any scripts or processes that use the Docker CLI + +### Step two: Remove any partial installation + +1. Move `/Applications/Docker.app` to Trash and empty Trash. +2. If you used a DMG installer, eject and re-mount the Docker DMG. + +### Step three: Reinstall Docker Desktop + +Follow the instructions in the [macOS installation guide](/manuals/desktop/setup/install/mac-install.md) to reinstall Docker Desktop. + +### If the dialog persists + +If you continue to see the "damaged" dialog after following the recovery steps: + +1. Gather diagnostics using the terminal. Follow the instructions in [Diagnose from the terminal](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md#diagnose-from-the-terminal). + + - Note down the your diagnostics ID displayed in the terminal after running diagnostics. + +2. Get help: + - If you have a paid Docker subscription, [contact support](/manuals/support/_index.md) and include your diagnostics ID + - For community users, [open an issue on GitHub](https://github.com/docker/desktop-feedback) and include your diagnostics ID + +## Prevention + +To avoid this issue in the future: + +- If your organization allows, update Docker Desktop via the in-app update flow +- Always quit applications that use Docker before installing Docker Desktop via the DMG installer drag-and-drop approach +- In managed environments, use PKG installations over DMG drag-and-drop +- Keep installer volumes mounted until installation is complete + +## Related information + +- [Install Docker Desktop on Mac](/manuals/desktop/setup/install/mac-install.md) +- [PKG installer documentation](/manuals/enterprise/enterprise-deployment/pkg-install-and-configure.md) +- [Troubleshoot Docker Desktop](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) +- [Known issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md) diff --git a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md index a234191093c..78ff6c96ff3 100644 --- a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md +++ b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md @@ -13,11 +13,7 @@ aliases: > [!TIP] > -> If you do not find a solution in troubleshooting, browse the GitHub repositories or create a new issue: -> -> - [docker/for-mac](https://github.com/docker/for-mac/issues) -> - [docker/for-win](https://github.com/docker/for-win/issues) -> - [docker/for-linux](https://github.com/docker/for-linux/issues) +> If you do not find a solution in troubleshooting, browse the GitHub repositories or [create a new issue](https://github.com/docker/desktop-feedback). ## Topics for all platforms @@ -59,7 +55,7 @@ Docker Desktop uses hardware-accelerated graphics by default, which may cause pr Disable hardware acceleration: -1. Edit Docker Desktop's `settings-store.json` file (or `settings.json` for Docker Desktop versions 4.34 and earlier). You can find this file at: +1. Edit Docker Desktop's `settings-store.json` file. You can find this file at: - Mac: `~/Library/Group Containers/group.com.docker/settings-store.json` - Windows: `C:\Users\[USERNAME]\AppData\Roaming\Docker\settings-store.json` @@ -156,6 +152,23 @@ Ensure your username is short enough to keep paths within the allowed limit: ## Topics for Mac +### Upgrade requires administrator privileges + +#### Cause + +On macOS, users without administrator privileges cannot perform in-app upgrades from the Docker Desktop Dashboard. + +#### Solution + +> [!IMPORTANT] +> +> Do not uninstall the current version before upgrading. Doing so deletes all local Docker containers, images, and volumes. + +To upgrade Docker Desktop: + +- Ask an administrator to install the newer version over the existing one. +- Use the []`--user` install flag](/manuals/desktop/setup/install/mac-install.md#security-and-access) if appropriate for your setup. + ### Persistent notification telling me an application has changed my Desktop configurations #### Cause @@ -203,24 +216,6 @@ See also, [Hypervisor Framework Reference](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/) in the Apple documentation, and Docker Desktop [Mac system requirements](/manuals/desktop/setup/install/mac-install.md#system-requirements). -### VPNKit keeps breaking - -#### Cause - -In Docker Desktop version 4.19, gVisor replaced VPNKit to enhance the performance of VM networking when using the Virtualization framework on macOS 13 and later. - -#### Solution - -To continue using VPNKit: - -1. Open your `settings-store.json` file located at `~/Library/Group Containers/group.com.docker/settings-store.json` -2. Add: - - ```JSON - $ "networkType":"vpnkit" - ``` -3. Save the file and restart Docker Desktop. - ## Topics for Windows ### Docker Desktop fails to start when anti-virus software is installed @@ -228,7 +223,7 @@ To continue using VPNKit: #### Cause Some anti-virus software may be incompatible with Hyper-V and Microsoft -Windows 10 builds. The conflict +Windows builds. The conflict typically occurs after a Windows update and manifests as an error response from the Docker daemon and a Docker Desktop start failure. @@ -445,6 +440,38 @@ The Virtual Machine Management Service failed to start the virtual machine 'Dock Try [enabling nested virtualization](/manuals/desktop/setup/vm-vdi.md#turn-on-nested-virtualization). +### Docker Desktop with Windows Containers fails with "The media is write protected"" + +#### Error message + +`FSCTL_EXTEND_VOLUME \\?\Volume{GUID}: The media is write protected` + +#### Cause + +If you're encountering failures when running Docker Desktop with Windows Containers, it might be due to +a specific Windows configuration policy: FDVDenyWriteAccess. + +This policy, when enabled, causes Windows to mount all fixed drives not encrypted by BitLocker-encrypted as read-only. +This also affects virtual machine volumes and as a result, Docker Desktop may not be able to start or run containers +correctly because it requires read-write access to these volumes. + +FDVDenyWriteAccess is a Windows Group Policy setting that, when enabled, prevents write access to fixed data drives that are not protected +by BitLocker. This is often used in security-conscious environments but can interfere with development tools like Docker. +In the Windows registry it can be found at `HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Policies\Microsoft\FVE\FDVDenyWriteAccess`. + +#### Solutions + +Docker Desktop does not support running Windows Containers on systems where FDVDenyWriteAccess is enabled. This setting interferes with the +ability of Docker to mount volumes correctly, which is critical for container functionality. + +To use Docker Desktop with Windows Containers, ensure that FDVDenyWriteAccess is disabled. You can check and change this setting in the registry or through Group Policy Editor (`gpedit.msc`) under: + +**Computer Configuration** > **Administrative Templates** > **Windows Components** > **BitLocker Drive Encryption** > **Fixed Data Drives** > **Deny write access to fixed drives not protected by BitLocker** + +> [!NOTE] +> +> Modifying Group Policy settings may require administrator privileges and should comply with your organization's IT policies. If the setting gets reset after some time this usually means that it was overridden by the centralized configuration of your IT department. Talk to them before making any changes. + ### `Docker Desktop Access Denied` error message when starting Docker Desktop #### Error message diff --git a/content/manuals/desktop/uninstall.md b/content/manuals/desktop/uninstall.md index 921bdf2f172..b3520295a40 100644 --- a/content/manuals/desktop/uninstall.md +++ b/content/manuals/desktop/uninstall.md @@ -10,7 +10,7 @@ weight: 210 > > Uninstalling Docker Desktop destroys Docker containers, images, volumes, and > other Docker-related data local to the machine, and removes the files generated -> by the application. To learn how to preserve important data before uninstalling, refer to the [back up and restore data](/manuals/desktop/settings-and-maintenance/backup-and-restore.md) section. +> by the application. To preserve important data before uninstalling, refer to the [back up and restore data](/manuals/desktop/settings-and-maintenance/backup-and-restore.md) section. {{< tabs >}} {{< tab name="Windows" >}} @@ -25,7 +25,11 @@ weight: 210 1. Locate the installer: ```console + # all-user installation $ C:\Program Files\Docker\Docker\Docker Desktop Installer.exe + + # per-user installation (Beta) + $ %LOCALAPPDATA%\Programs\DockerDesktop\Docker Desktop Installer.exe ``` 2. Uninstall Docker Desktop. - In PowerShell, run: @@ -91,13 +95,6 @@ $ rm -rf ~/Library/Group\ Containers/group.com.docker $ rm -rf ~/.docker ``` -With Docker Desktop version 4.36 and earlier, the following files may also be left on the file system. You can remove these with administrative privileges: - -```console -/Library/PrivilegedHelperTools/com.docker.vmnetd -/Library/PrivilegedHelperTools/com.docker.socket -``` - {{< /tab >}} {{< tab name="Ubuntu" >}} @@ -170,7 +167,7 @@ To uninstall Docker Desktop for Fedora: ```console $ rm -r $HOME/.docker/desktop $ sudo rm /usr/local/bin/com.docker.cli - $ sudo apt purge docker-desktop + $ sudo dnf remove docker-desktop ``` This removes configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purges the remaining systemd service files. @@ -187,20 +184,18 @@ To uninstall Docker Desktop for Arch: 1. Remove the Docker Desktop application. Run: ```console - $ sudo pacman remove docker-desktop + $ sudo pacman -Rns docker-desktop ``` - This removes the Docker Desktop package itself but doesn’t delete all of its files or settings. + This removes the Docker Desktop package along with its configuration files and dependencies not required by other packages. -2. Manually remove leftover file. +2. Manually remove leftover files. ```console $ rm -r $HOME/.docker/desktop - $ sudo rm /usr/local/bin/com.docker.cli - $ sudo apt purge docker-desktop ``` - This removes configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purges the remaining systemd service files. + This removes configuration and data files at `$HOME/.docker/desktop`. 3. Clean up Docker config settings. In `$HOME/.docker/config.json`, remove the `credsStore` and `currentContext` properties. diff --git a/content/manuals/desktop/use-desktop/_index.md b/content/manuals/desktop/use-desktop/_index.md index b12be1c3ef6..c1d3dd455c8 100644 --- a/content/manuals/desktop/use-desktop/_index.md +++ b/content/manuals/desktop/use-desktop/_index.md @@ -10,13 +10,13 @@ aliases: When you open Docker Desktop, the Docker Desktop Dashboard displays. -![Docker Desktop Dashboard on Containers view](../images/dashboard.png) +![Docker Desktop Dashboard on Containers view](../images/dashboard.webp) -It provides a centralized interface to manage your [containers](container.md), [images](images.md), [volumes](volumes.md), and [builds](builds.md). +It provides a centralized interface to manage your [containers](container.md), [images](images.md), [volumes](volumes.md), [builds](builds.md), [Kubernetes resources](kubernetes.md), and [logs](logs.md). In addition, the Docker Desktop Dashboard lets you: -- Use [Ask Gordon](/manuals/ai/gordon/_index.md), a personal AI assistant embedded in Docker Desktop and the Docker CLI. It's designed to streamline your workflow and help you make the most of the Docker ecosystem. +- Use [Gordon](/manuals/ai/gordon/_index.md), a personal AI assistant embedded in Docker Desktop and the Docker CLI. It's designed to streamline your workflow and help you make the most of the Docker ecosystem. - Navigate to the **Settings** menu to configure your Docker Desktop settings. Select the **Settings** icon in the Dashboard header. - Access the **Troubleshoot** menu to debug and perform restart operations. Select the **Troubleshoot** icon in the Dashboard header. - Be notified of new releases, installation progress updates, and more in the **Notifications center**. Select the bell icon in the bottom-right corner of the Docker Desktop Dashboard to access the notification center. @@ -28,6 +28,10 @@ In addition, the Docker Desktop Dashboard lets you: - Get to the [Docker Scout](../../scout/_index.md) dashboard. - Navigate to [Docker Extensions](/manuals/extensions/_index.md). +> [!TIP] +> +> You can customize the left-hand navigation to show only the tabs that matter to you, and hide the ones that don’t. Right-click the left-hand navigation, select **Customize**, and then select, deselect, or re-order the tabs. + ## Docker terminal From the Docker Dashboard footer, you can use the integrated terminal directly within Docker Desktop. diff --git a/content/manuals/desktop/use-desktop/builds.md b/content/manuals/desktop/use-desktop/builds.md index c31b1e61140..5a47b1332bb 100644 --- a/content/manuals/desktop/use-desktop/builds.md +++ b/content/manuals/desktop/use-desktop/builds.md @@ -16,9 +16,7 @@ connected to the same cloud builder. > [!NOTE] > -> When building Windows container images using the `docker build` command, the legacy builder is used which does not populate the **Builds** view. To switch to using BuildKit, you can either: -> - Set `DOCKER_BUILDKIT=1` in the build command, such as `DOCKER_BUILDKIT=1 docker build .` or -> - Use the `docker buildx build` command +> Windows container image builds use the legacy builder and do not appear in the **Builds** view. Only BuildKit-powered builds are shown here. ## Show build list @@ -37,8 +35,6 @@ Docker Desktop settings. ### Import builds -{{< summary-bar feature_name="Import builds" >}} - The **Import builds** button lets you import build records for builds by other people, or builds in a CI environment. When you've imported a build record, it gives you full access to the logs, traces, and other data for that build, diff --git a/content/manuals/desktop/use-desktop/container.md b/content/manuals/desktop/use-desktop/container.md index 2a4ff86a54e..1017939ebce 100644 --- a/content/manuals/desktop/use-desktop/container.md +++ b/content/manuals/desktop/use-desktop/container.md @@ -66,11 +66,11 @@ Using the **Exec** tab is the same as running one of the following commands: - `docker exec -it /bin/sh` - `docker exec -it cmd.exe` when accessing Windows containers -For more details, see the [`docker exec` CLI reference](/reference/cli/docker/exec/). +For more details, see the [`docker exec` CLI reference](/reference/cli/docker/container/exec/). If you have enabled Docker Debug in settings, or toggled on **Debug mode** to the right of the tab options, the **Debug** tab displays. -Debug mode requires a [Pro, Team, or Business subscription](/subscription/details/). Debug mode has several advantages, such as: +Debug mode has several advantages, such as: - A customizable toolbox. The toolbox comes with many standard Linux tools pre-installed, such as `vim`, `nano`, `htop`, and `curl`. For more details, see the [`docker debug` CLI reference](/reference/cli/docker/debug/). @@ -79,13 +79,9 @@ Debug mode requires a [Pro, Team, or Business subscription](/subscription/detail To use debug mode: -1. Sign in to Docker Desktop with an account that has a Pro, Team, or Business - subscription. -2. After you're signed in, either: - - - Hover over your running container and under the **Actions** column, select the **Show container actions** - menu. From the drop-down menu, select **Use Docker Debug**. - - Or, select the container and then select the **Debug** tab. +- Hover over your running container and under the **Actions** column, select the **Show container actions** +menu. From the drop-down menu, select **Use Docker Debug**. +- Or, select the container and then select the **Debug** tab. To use debug mode by default, navigate to the **General** tab in **Settings** and select the **Enable Docker Debug by diff --git a/content/manuals/desktop/use-desktop/kubernetes.md b/content/manuals/desktop/use-desktop/kubernetes.md new file mode 100644 index 00000000000..7f6c28052ed --- /dev/null +++ b/content/manuals/desktop/use-desktop/kubernetes.md @@ -0,0 +1,217 @@ +--- +description: See how you can deploy to Kubernetes on Docker Desktop +keywords: deploy, kubernetes, kubectl, orchestration, Docker Desktop +title: Explore the Kubernetes view +linkTitle: Kubernetes +aliases: +- /docker-for-windows/kubernetes/ +- /docker-for-mac/kubernetes/ +- /desktop/kubernetes/ +- /desktop/features/kubernetes/ +weight: 50 +--- + +Docker Desktop includes a standalone Kubernetes server and client, as well as Docker CLI integration, enabling local Kubernetes development and testing directly on your machine. + +The Kubernetes server runs as a single or multi-node cluster, within Docker containers. This lightweight setup helps you explore Kubernetes features, test workloads, and work with container orchestration in parallel with other Docker features. + +## Enable Kubernetes + +With Docker Desktop version 4.51 and later, you can manage Kubernetes directly from the **Kubernetes** view in the Docker Desktop Dashboard. + +1. Open the Docker Desktop Dashboard and select the **Kubernetes** view. +2. Select **Create cluster**. +3. Choose your cluster type: + - **Kubeadm** creates a single-node cluster and the version is set by Docker Desktop. + - **kind** creates a multi-node cluster and you can set the version and number of nodes. + For more detailed information on each cluster type, see [Cluster provisioning method](#cluster-provisioning-method). +4. Optional: Select **Show system containers (advanced)** to view internal containers when using Docker commands. +5. Select **Create**. + +This sets up the images required to run the Kubernetes server as containers, and installs the `kubectl` command-line tool on your system at `/usr/local/bin/kubectl` (Mac) or `C:\Program Files\Docker\Docker\resources\bin\kubectl.exe`(all-user installations) or `%LOCALAPPDATA%\Programs\DockerDesktop\resources\bin\kubectl.exe` (per-user installations) (Windows). If you installed `kubectl` using Homebrew, or by some other method, and experience conflicts, remove `/usr/local/bin/kubectl`. + + > [!NOTE] + > + > Docker Desktop for Linux does not include `kubectl` by default. You can install it separately by following the [Kubernetes installation guide](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/). Ensure the `kubectl` binary is installed at `/usr/local/bin/kubectl`. + +The following actions are also triggered in the Docker Desktop backend and VM: + +- Generation of certificates and cluster configuration +- Download and installation of Kubernetes internal components +- Cluster boot-up +- Installation of additional controllers for networking and storage + +When Kubernetes is enabled, its status is displayed in the Docker Desktop Dashboard footer and the Docker menu. + +You can check which version of Kubernetes you're on with: + +```console +$ kubectl version +``` + +### Cluster provisioning method + +Docker Desktop Kubernetes can be provisioned with either the `kubeadm` or `kind` +provisioners. + +`kubeadm` is the older provisioner. It supports a single-node cluster, you can't select the kubernetes +version, it's slower to provision than `kind`, and it's not supported by [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/index.md) (ECI), +meaning that if ECI is enabled the cluster works but it's not protected by ECI. + +`kind` is the newer provisioner. It supports multi-node clusters (for +a more realistic Kubernetes setup), you can choose the Kubernetes version, it's +faster to provision than `kubeadm`, and it's supported by ECI - when ECI is +enabled, the Kubernetes cluster runs in unprivileged Docker containers, thus +making it more secure. + +| Feature | `kubeadm` | `kind` | +| :------ | :-----: | :--: | +| Multi-node cluster support | No | Yes | +| Kubernetes version selector | No | Yes | +| Speed to provision | ~1 min | ~30 seconds | +| Supported by ECI | No | Yes | +| Works with containerd image store | Yes | Yes | +| Works with Docker image store | Yes | No | + +## Dashboard view + +When a Kubernetes cluster is enabled, the **Kubernetes** view displays a live dashboard view showing: + +- A namespace selector at the top +- A real-time list of resources - pods, services, deployments - in the selected namespace +- Automatic updates when resources are created, deleted, or modified + +## Verify installation + +Confirm that your cluster is running: + +```console +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +docker-desktop Ready control-plane 3h v1.29.1 +``` + +If kubectl is pointing to another environment, switch to the Docker Desktop context: + +```console +$ kubectl config use-context docker-desktop +``` + +>[!TIP] +> +> If no contexts appear, try: +> +> - Running the command in the Command Prompt or PowerShell. +> - Setting the `KUBECONFIG` environment variable to point to your `.kube/config` file. + + +For more information about `kubectl`, see the +[`kubectl` documentation](https://kubernetes.io/docs/reference/kubectl/overview/). + +## Edit or stop your cluster + +When Kubernetes is enabled: + +- Select **Edit cluster** to modify configuration. For example, switch between **kubeadm** and **kind**, or change the number of nodes. +- Select **Stop** to disable the cluster. Progress is displayed, and the **Kubernetes** view returns to the **Create cluster** screen. This stops and removes Kubernetes containers, and also removes the `/usr/local/bin/kubectl` command. + +## Upgrade your cluster + +Kubernetes clusters are not automatically upgraded with Docker Desktop updates. To upgrade the cluster, you must manually select **Reset cluster** in the **Kubernetes** settings. + +## Configuring a custom image registry for Kubernetes control plane images + +Docker Desktop uses containers to run the Kubernetes control plane. By default, Docker Desktop pulls +the associated container images from Docker Hub. The images pulled depend on the [cluster provisioning mode](#cluster-provisioning-method). + +For example, in `kind` mode it requires the following images: + +```console +docker.io/kindest/node: +docker.io/envoyproxy/envoy: +docker.io/docker/desktop-cloud-provider-kind: +docker.io/docker/desktop-containerd-registry-mirror: +``` + +In `kubeadm` mode it requires the following images: + +```console +docker.io/docker/desktop-kubernetes: +docker.io/docker/desktop-storage-provisioner: +docker.io/docker/desktop-vpnkit-controller: +docker.io/docker/desktop-kubernetes-etcd: +docker.io/docker/desktop-kubernetes-coredns: +docker.io/docker/desktop-kubernetes-pause: +docker.io/docker/desktop-kubernetes-apiserver: +docker.io/docker/desktop-kubernetes-controller-manager: +docker.io/docker/desktop-kubernetes-scheduler: +docker.io/docker/desktop-kubernetes-proxy: +``` + +The image tags are automatically selected by Docker Desktop based on several +factors, including the version of Kubernetes being used. The tags vary for each image and may change between Docker Desktop releases. To stay informed, monitor the Docker Desktop release notes. + +> [!NOTE] +> +> In Docker Desktop versions 4.44 or later you can run `docker desktop kubernetes images list` to list Kubernetes images used by the currently installed version of Docker Desktop. +> For more information, see the [Docker Desktop CLI](/reference/cli/docker/desktop/kubernetes/images). + +To accommodate scenarios where access to Docker Hub is not allowed, admins can +configure Docker Desktop to pull the above listed images from a different registry (e.g., a mirror) +using the [KubernetesImagesRepository](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md#kubernetes) setting as follows. + +An image name can be broken into `[registry[:port]/][namespace/]repository[:tag]` components. +The `KubernetesImagesRepository` setting allows users to override the `[registry[:port]/][namespace]` +portion of the image's name. + +For example, if Docker Desktop Kubernetes is configured in `kind` mode and +`KubernetesImagesRepository` is set to `my-registry:5000/kind-images`, then +Docker Desktop will pull the images from: + +```console +my-registry:5000/kind-images/node: +my-registry:5000/kind-images/envoy: +my-registry:5000/kind-images/desktop-cloud-provider-kind: +my-registry:5000/kind-images/desktop-containerd-registry-mirror: +``` + +These images should be cloned/mirrored from their respective images in Docker Hub. The tags must +also match what Docker Desktop expects. + +The recommended approach to set this up is the following: + +1. Start Kubernetes using the desired cluster provisioning method: `kubeadm` or `kind`. +2. After Kubernetes has started, use either: + - (Docker Desktop version 4.44 or later) `docker desktop kubernetes images list` to list the image tags that will be pulled by the current Docker Desktop installation + - `docker ps` to view the container images used by Docker Desktop for the Kubernetes control plane +3. Clone or mirror those images (with matching tags) to your custom registry. +4. Stop the Kubernetes cluster. +5. Configure the `KubernetesImagesRepository` setting to point to your custom registry. +6. Restart Docker Desktop. +7. Verify that the Kubernetes cluster is using the custom registry images using the `docker ps` command. + +> [!NOTE] +> +> The `KubernetesImagesRepository` setting only applies to control plane images used by Docker Desktop +> to set up the Kubernetes cluster. It has no effect on other Kubernetes pods. + +> [!NOTE] +> +> In Docker Desktop versions 4.43 or earlier, when using `KubernetesImagesRepository` and [Enhanced Container Isolation (ECI)](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) +> is enabled, add the following images to the [ECI Docker socket mount image list](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md#enhanced-container-isolation): +> +> `[imagesRepository]/desktop-cloud-provider-kind:` +> `[imagesRepository]/desktop-containerd-registry-mirror:` +> +> These containers mount the Docker socket, so you must add the images to the ECI images list. If not, +> ECI will block the mount and Kubernetes won't start. + +## Troubleshooting + +- If Kubernetes fails to start, make sure Docker Desktop is running with enough allocated resources. Check **Settings** > **Resources**. +- If the `kubectl` commands return errors, confirm the context is set to `docker-desktop` + ```console + $ kubectl config use-context docker-desktop + ``` + You can then try checking the logs of the Kubernetes system containers if you have enabled that setting. +- If you're experiencing cluster issues after updating, reset your Kubernetes cluster. Resetting a Kubernetes cluster can help resolve issues by essentially reverting the cluster to a clean state, and clearing out misconfigurations, corrupted data, or stuck resources that may be causing problems. If the issue still persists, you may need to clean and purge data, and then restart Docker Desktop. diff --git a/content/manuals/desktop/use-desktop/logs.md b/content/manuals/desktop/use-desktop/logs.md new file mode 100644 index 00000000000..fcd2127ea28 --- /dev/null +++ b/content/manuals/desktop/use-desktop/logs.md @@ -0,0 +1,50 @@ +--- +description: Understand what you can do with the Logs view on Docker Dashboard +keywords: Docker Dashboard, manage, logs, gui, build logs, container logs, debugging, dashboard +title: Explore the Logs view in Docker Desktop +linkTitle: Logs +weight: 60 +--- + +{{< summary-bar feature_name="Desktop logs" >}} + +The **Logs** view provides a unified, real-time log stream from all running containers and Kubernetes nodes in Docker Desktop. Unlike the logs accessible from the [**Containers** view](container.md), the **Logs** view lets you monitor and search log output across your entire environment from a single interface. + +## Log entries + +Each log entry in the table view shows: + +| Column | Description | +| ------------- | ------------------------------------------------------------------------------ | +| **Timestamp** | The date and time the log line was emitted, for example `2026-02-26 11:18:53`. | +| **Object** | The container or node that produced the log line. | +| **Message** | The full log message, including any status codes such as `[ OK ]`. | + +Selecting the expand arrow to the left of a row reveals the full message for that entry. + +## Search and filter logs + +Use the **Search** field at the top of the Logs view to find specific entries. The search bar supports: + +- Plain-text terms for exact match searches +- Regular expressions (for example, `/error|warn/`) + +You can save your search terms for easy-access later. + +To refine the log stream further, select the **Filter** icon in the toolbar to open the container filter panel. From here you can: + +- Check individual containers to show only their output +- Check Compose stacks to show or hide entire groups +- Use **Select all** or **Clear all** to quickly toggle every container at once + +## Display settings + +Select the **Display settings** icon in the toolbar to toggle the following: + +- **View build logs**: Include or exclude build-related log output in the stream +- **Wrap lines** +- **Show timestamps** + +## Feedback + +Select **Give feedback** at the top of the view to share suggestions or report issues. diff --git a/content/manuals/desktop/use-desktop/pause.md b/content/manuals/desktop/use-desktop/pause.md index e7b882097f0..14265255c55 100644 --- a/content/manuals/desktop/use-desktop/pause.md +++ b/content/manuals/desktop/use-desktop/pause.md @@ -2,7 +2,7 @@ description: understand what pausing Docker Desktop Dashboard means keywords: Docker Desktop Dashboard, manage, containers, gui, dashboard, pause, user manual title: Pause Docker Desktop -weight: 60 +weight: 80 --- Pausing Docker Desktop temporarily suspends the Linux VM running Docker Engine. This saves the current state of all containers in memory and freezes all running processes, significantly reducing CPU and memory usage which is helpful for conserving battery on laptops. diff --git a/content/manuals/desktop/use-desktop/resource-saver.md b/content/manuals/desktop/use-desktop/resource-saver.md index 10162288b35..746db0ab3fa 100644 --- a/content/manuals/desktop/use-desktop/resource-saver.md +++ b/content/manuals/desktop/use-desktop/resource-saver.md @@ -3,7 +3,7 @@ description: Understand what Docker Desktop Resource Saver mode is and how to co keywords: Docker Dashboard, resource saver, manage, containers, gui, dashboard, user manual title: Docker Desktop's Resource Saver mode linkTitle: Resource Saver mode -weight: 50 +weight: 70 --- Resource Saver mode significantly reduces Docker @@ -20,7 +20,7 @@ experience. Resource Saver is enabled by default but can be disabled by navigating to the **Resources** tab, in **Settings**. You can also configure the idle timer as shown below. -![Resource Saver Settings](../images/resource-saver-settings.png) +![Resource Saver Settings](../images/resource-saver-settings.webp) If the values available aren't sufficient for your needs, you can reconfigure it to any value, as long as the value is larger than 30 seconds, by @@ -33,13 +33,9 @@ changing `autoPauseTimeoutSeconds` in the Docker Desktop `settings-store.json` f There's no need to restart Docker Desktop after reconfiguring. When Docker Desktop enters Resource Saver mode: -- A leaf icon displays on the +- A moon icon displays on the Docker Desktop status bar as well as on the Docker icon in -the system tray. The following image shows the Linux VM CPU and memory utilization reduced -to zero when Resource Saver mode is on. - - ![Resource Saver Status Bar](../images/resource-saver-status-bar.png) - +the system tray. - Docker commands that don't run containers, for example listing container images or volumes, don't necessarily trigger an exit from Resource Saver mode as Docker Desktop can serve such commands without unnecessarily waking up the Linux VM. > [!NOTE] diff --git a/content/manuals/desktop/use-desktop/volumes.md b/content/manuals/desktop/use-desktop/volumes.md index 1486ff4f8db..6b3cafd7eec 100644 --- a/content/manuals/desktop/use-desktop/volumes.md +++ b/content/manuals/desktop/use-desktop/volumes.md @@ -146,13 +146,13 @@ You can either [export a volume now](#export-a-volume-now) or [schedule a recurr {{< /tab >}} {{< tab name="External cloud storage" >}} - You must have a [Docker Business subscription](../../subscription/details.md) to export to an external cloud provider. + You must have a [Docker Business subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopVolumes) to export to an external cloud provider. Select your cloud provider and then specify the URL to upload to the storage. Refer to the following documentation for your cloud provider to learn how to obtain a URL. - - Amazon Web Services: [Create a presigned URL for Amazon S3 using an AWS SDK](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example_s3_Scenario_PresignedUrl_section.html) + - Amazon Web Services: [Create a presigned URL of Amazon S3 using an AWS SDK](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example_s3_Scenario_PresignedUrl_section.html) - Microsoft Azure: [Generate a SAS token and URL](https://learn.microsoft.com/en-us/azure/data-explorer/kusto/api/connection-strings/generate-sas-token) - Google Cloud: [Create a signed URL to upload an object](https://cloud.google.com/storage/docs/access-control/signing-urls-with-helpers#upload-object) @@ -163,7 +163,7 @@ You can either [export a volume now](#export-a-volume-now) or [schedule a recurr ### Schedule a volume export -1. Sign in to Docker Desktop. You must be signed in and have a paid [Docker subscription](../../subscription/details.md) to schedule a volume export. +1. Sign in to Docker Desktop. You must be signed in and have a paid [Docker subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopVolumes) to schedule a volume export. 2. In the **Volumes** view, select the volume you want to export. 3. Select the **Exports** tab. 4. Select **Schedule export**. @@ -192,13 +192,13 @@ You can either [export a volume now](#export-a-volume-now) or [schedule a recurr {{< /tab >}} {{< tab name="External cloud storage" >}} - You must have a [Docker Business subscription](../../subscription/details.md) to export to an external cloud provider. + You must have a [Docker Business subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsDesktopVolumes) to export to an external cloud provider. Select your cloud provider and then specify the URL to upload to the storage. Refer to the following documentation for your cloud provider to learn how to obtain a URL. - - Amazon Web Services: [Create a presigned URL for Amazon S3 using an AWS SDK](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example_s3_Scenario_PresignedUrl_section.html) + - Amazon Web Services: [Create a presigned URL of Amazon S3 using an AWS SDK](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example_s3_Scenario_PresignedUrl_section.html) - Microsoft Azure: [Generate a SAS token and URL](https://learn.microsoft.com/en-us/azure/data-explorer/kusto/api/connection-strings/generate-sas-token) - Google Cloud: [Create a signed URL to upload an object](https://cloud.google.com/storage/docs/access-control/signing-urls-with-helpers#upload-object) diff --git a/content/manuals/dhi/_index.md b/content/manuals/dhi/_index.md new file mode 100644 index 00000000000..130c77daabf --- /dev/null +++ b/content/manuals/dhi/_index.md @@ -0,0 +1,75 @@ +--- +title: Docker Hardened Images +description: Secure, minimal, and production-ready base images +weight: 8 +params: + sidebar: + group: Supply chain security + badge: + color: green + text: New + grid_sections: + - title: Quickstart + description: Follow a step-by-step guide to explore and run a Docker Hardened Image. + icon: rocket_launch + link: /dhi/get-started/ + - title: Explore + description: Learn what Docker Hardened Images are, how they're built, and what sets them apart from typical base images. + icon: info + link: /dhi/explore/ + - title: Features + description: Discover the security, compliance, and enterprise-readiness features built into Docker Hardened Images. + icon: lock + link: /dhi/features/ + - title: How-tos + description: Step-by-step guides for using, verifying, scanning, and migrating to Docker Hardened Images. + icon: play_arrow + link: /dhi/how-to/ + - title: Core concepts + description: Understand the secure supply chain principles that make Docker Hardened Images production-ready. + icon: fact_check + link: /dhi/core-concepts/ + - title: Troubleshoot + description: Resolve common issues with building, running, or debugging Docker Hardened Images. + icon: help_center + link: /dhi/troubleshoot/ + - title: Additional resources + description: Guides, blog posts, Docker Hub catalog, GitHub repositories, and more. + icon: link + link: /dhi/resources/ +--- + +Docker Hardened Images (DHI) provide minimal, secure, and production-ready +container images, Helm charts, and system packages maintained by Docker. +Designed to reduce vulnerabilities and simplify compliance, DHI integrates +easily into your existing Docker-based workflows with little to no retooling +required. + +DHI is available in the following three subscriptions. + +| Feature | Community | Select | Enterprise | +|---|---|---|---| +| Hardened, minimal images | ✅ | ✅ | ✅ | +| Near-zero CVEs | ✅ | ✅ | ✅ | +| Verifiable SBOMs & SLSA Build L3 provenance | ✅ | ✅ | ✅ | +| Full, unsuppressed CVE visibility | ✅ | ✅ | ✅ | +| Drop-in adoption, no workflow changes | ✅ | ✅ | ✅ | +| Full catalog of open source images under Apache 2.0 | ✅ | ✅ | ✅ | +| Built with Docker Hardened System Packages | ✅ | ✅ | ✅ | +| Upstream cadence for Docker-released patches | ✅ | ✅ | ✅ | +| FIPS/STIG variants | ❌ | ✅ | ✅ | +| Critical CVE fixes < 7 days with SLA-backed continuous patching | ❌ | ✅ | ✅ | +| Customizations | ❌ | Up to 5 | Unlimited | +| Access to Hardened System Packages repository | ❌ | ❌ | ✅ | +| Full catalog access available | ❌ | ❌ | ✅ | +| Extended Lifecycle Support add-on available | ❌ | ❌ | ✅

Includes:
✅ +5 years of hardened updates
✅ Maintains security updates after upstream EOL
✅ SBOMs & provenance
✅ Protects long-lived workloads | + +For pricing and more details, see the [Docker Hardened Images subscription +comparison](https://www.docker.com/products/hardened-images/#compare). + +Explore the sections below to get started with Docker Hardened Images, integrate +them into your workflow, and learn what makes them secure and enterprise-ready. + +{{< grid + items="grid_sections" +>}} diff --git a/content/manuals/dhi/core-concepts/_index.md b/content/manuals/dhi/core-concepts/_index.md new file mode 100644 index 00000000000..2d02ecafc05 --- /dev/null +++ b/content/manuals/dhi/core-concepts/_index.md @@ -0,0 +1,112 @@ +--- +title: Core concepts +description: Learn the core concepts behind Docker Hardened Images, including security metadata, vulnerability management, image structure, and verification. +weight: 30 +params: + grid_concepts_metadata: + - title: Attestations + description: Review the full set of signed attestations included with each Docker Hardened Image, such as SBOMs, VEX, build provenance, and scan results. + icon: assignment + link: /dhi/core-concepts/attestations/ + - title: Software Bill of Materials (SBOMs) + description: Learn what SBOMs are, why they matter, and how Docker Hardened Images include signed SBOMs to support transparency and compliance. + icon: list_alt + link: /dhi/core-concepts/sbom/ + - title: Supply-chain Levels for Software Artifacts (SLSA) + description: Learn how Docker Hardened Images comply with SLSA Build Level 3 and how to verify provenance for secure, tamper-resistant builds. + icon: fact_check + link: /dhi/core-concepts/slsa/ + - title: Image provenance + description: Learn how build provenance metadata helps trace the origin of Docker Hardened Images and support compliance with SLSA. + icon: track_changes + link: /dhi/core-concepts/provenance/ + + grid_concepts_compliance: + - title: FIPS + description: Learn how Docker Hardened Images support FIPS 140 by using validated cryptographic modules and providing signed attestations for compliance audits. + icon: verified + link: /dhi/core-concepts/fips/ + - title: STIG + description: Learn how Docker Hardened Images provide STIG-ready container images with verifiable security scan attestations for government and enterprise compliance requirements. + icon: policy + link: /dhi/core-concepts/stig/ + - title: CIS Benchmarks + description: Learn how Docker Hardened Images help you meet Center for Internet Security (CIS) Docker Benchmark requirements for secure container configuration and deployment. + icon: check_circle + link: /dhi/core-concepts/cis/ + + grid_concepts_risk: + - title: Common Vulnerabilities and Exposures (CVEs) + description: Understand what CVEs are, how Docker Hardened Images reduce exposure, and how to scan images for vulnerabilities using popular tools. + icon: error + link: /dhi/core-concepts/cves/ + - title: Vulnerability Exploitability eXchange (VEX) + description: Learn how VEX helps you prioritize real risks by identifying which vulnerabilities in Docker Hardened Images are actually exploitable. + icon: warning + link: /dhi/core-concepts/vex/ + - title: Software Supply Chain Security + description: Learn how Docker Hardened Images help secure every stage of your software supply chain with signed metadata, provenance, and minimal attack surface. + icon: shield + link: /dhi/core-concepts/sscs/ + - title: Secure Software Development Lifecycle (SSDLC) + description: See how Docker Hardened Images support a secure SDLC by integrating with scanning, signing, and debugging tools. + icon: build_circle + link: /dhi/core-concepts/ssdlc/ + + grid_concepts_structure: + - title: Distroless images + description: Learn how Docker Hardened Images use distroless variants to minimize attack surface and remove unnecessary components. + icon: layers_clear + link: /dhi/core-concepts/distroless/ + - title: glibc and musl support in Docker Hardened Images + description: Compare glibc and musl variants of DHIs to choose the right base image for your application’s compatibility, size, and performance needs. + icon: swap_vert + link: /dhi/core-concepts/glibc-musl/ + - title: Image immutability + description: Understand how image digests, read-only containers, and signed metadata ensure Docker Hardened Images are tamper-resistant and immutable. + icon: do_not_disturb_on + link: /dhi/core-concepts/immutability/ + - title: Image hardening + description: Learn how Docker Hardened Images are designed for security, with minimal components, nonroot execution, and secure-by-default configurations. + icon: security + link: /dhi/core-concepts/hardening/ + + grid_concepts_verification: + - title: Digests + description: Learn how to use immutable image digests to guarantee consistency and verify the exact Docker Hardened Image you're running. + icon: fingerprint + link: /dhi/core-concepts/digests/ + - title: Code signing + description: Understand how Docker Hardened Images are cryptographically signed using Cosign to verify authenticity, integrity, and secure provenance. + icon: key + link: /dhi/core-concepts/signatures/ +--- + +Docker Hardened Images (DHIs) are built on a foundation of secure software +supply chain practices. This section explains the core concepts behind that +foundation, from signed attestations and immutable digests to standards like SLSA +and VEX. + +Start here if you want to understand how Docker Hardened Images support compliance, +transparency, and security. + + +## Security metadata and attestations + +{{< grid items="grid_concepts_metadata" >}} + +## Compliance standards + +{{< grid items="grid_concepts_compliance" >}} + +## Vulnerability and risk management + +{{< grid items="grid_concepts_risk" >}} + +## Image structure and behavior + +{{< grid items="grid_concepts_structure" >}} + +## Verification and traceability + +{{< grid items="grid_concepts_verification" >}} \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/attestations.md b/content/manuals/dhi/core-concepts/attestations.md new file mode 100644 index 00000000000..af72fd2e666 --- /dev/null +++ b/content/manuals/dhi/core-concepts/attestations.md @@ -0,0 +1,159 @@ +--- +title: Attestations +description: Review the full set of signed attestations included with each Docker Hardened Image, such as SBOMs, VEX, build provenance, and scan results. +keywords: container image attestations, signed sbom, build provenance, slsa compliance, vex document +--- + +Docker Hardened Images (DHIs) and charts include comprehensive, signed security +attestations that verify the image's build process, contents, and security +posture. These attestations are a core part of secure software supply chain +practices and help users validate that an image is trustworthy and +policy-compliant. + +## What is an attestation? + +An attestation is a signed statement that provides verifiable information +about an image or chart, such as how it was built, what's inside it, and what security +checks it has passed. Attestations are typically signed using Sigstore tooling +(such as Cosign), making them tamper-evident and cryptographically verifiable. + +Attestations follow standardized formats (like [in-toto](https://in-toto.io/), +[CycloneDX](https://cyclonedx.org/), and [SLSA](https://slsa.dev/)) and are +attached to the image or chart as OCI-compliant metadata. They can be generated +automatically during image builds or added manually to document extra tests, +scan results, or custom provenance. + +## Why are attestations important? + +Attestations provide critical visibility into the software supply chain by: + +- Documenting *what* went into an image (e.g., SBOMs) +- Verifying *how* it was built (e.g., build provenance) +- Capturing *what security scans* it has passed or failed (e.g., CVE reports, + secrets scans, test results) +- Helping organizations enforce compliance and security policies +- Supporting runtime trust decisions and CI/CD policy gates + +They are essential for meeting industry standards such as SLSA, +and help teams reduce the risk of supply chain attacks by making build and +security data transparent and verifiable. + +## How Docker Hardened Images and charts use attestations + +All DHIs and charts are built using [SLSA Build Level +3](https://slsa.dev/spec/latest/levels) practices, and each image variant is +published with a full set of signed attestations. These attestations allow users +to: + +- Verify that the image or chart was built from trusted sources in a secure environment +- View SBOMs in multiple formats to understand component-level details +- Review scan results to check for vulnerabilities or embedded secrets +- Confirm the build and deployment history of each image + +Attestations are automatically published and associated with each DHI +and chart. They can be inspected using tools like [Docker +Scout](../how-to/verify.md) or +[Cosign](https://docs.sigstore.dev/cosign/overview), and are consumable by CI/CD +tooling or security platforms. + +## Image attestations + +While every DHI variant includes a set of attestations, the attestations may +vary based on the image variant. For example, some images may include a STIG +scan attestation. The following table is a comprehensive list of all +attestations that may be included with a DHI. To see which attestations are +available for a specific image variant, including the specific predicate type URIs, +use Docker Scout: + +```console +$ docker scout attest list dhi.io/: +``` + +For more details, see [Verify image attestations](../how-to/verify.md#verify-image-attestations). + +| Attestation type | Description | +|----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CycloneDX SBOM | A software bill of materials in [CycloneDX](https://cyclonedx.org/) format, listing components, libraries, and versions. | +| STIG scan | Results of a STIG scan, with output in HTML and XCCDF formats. | +| CVEs (In-Toto format) | A list of known vulnerabilities (CVEs) affecting the image's components, based on package and distribution scanning. | +| VEX | A [Vulnerability Exploitability eXchange (VEX)](https://openvex.dev/) document that identifies vulnerabilities that do not apply to the image and explains why (e.g., not reachable or not present). | +| Scout health score | A signed attestation from Docker Scout that summarizes the overall security and quality posture of the image. | +| Scout provenance | Provenance metadata generated by Docker Scout, including the source Git commit, build parameters, and environment details. | +| Scout SBOM | An SBOM generated and signed by Docker Scout, including additional Docker-specific metadata. | +| Secrets scan | Results of a scan for accidentally included secrets, such as credentials, tokens, or private keys. | +| Tests | A record of automated tests run against the image, such as functional checks or validation scripts. | +| Virus scan | Results of antivirus scans performed on the image layers. For details, see [Malware scanning](../explore/malware-scanning.md). | +| CVEs (Scout format) | A vulnerability report generated by Docker Scout, listing known CVEs and severity data. | +| SLSA provenance | A standard [SLSA](https://slsa.dev/) provenance statement describing how the image was built, including build tool, parameters, and source. | +| SLSA verification summary | A summary attestation indicating the image's compliance with SLSA requirements. | +| SPDX SBOM | An SBOM in [SPDX](https://spdx.dev/) format, widely adopted in open-source ecosystems. | +| FIPS compliance | An attestation that verifies the image uses FIPS 140-validated cryptographic modules. | +| DHI Image Sources | Links to a corresponding source image containing all materials used to build the image, including package source code, Git repositories, and local files, ensuring compliance with open source license requirements. | + +## Package attestations + +In addition to image-level attestations, Docker hardened packages also include +their own attestations. These package-level attestations provide provenance and +build information for individual packages within an image, allowing you to +trace the supply chain at a granular level. + +Package attestations include similar information as image attestations, such as +SLSA provenance, showing how each package was built and what materials were +used. You can extract package information from an image's attestations and then +retrieve the package's own attestations recursively. + +For detailed instructions on how to access and verify package attestations, see +[Package attestations](../how-to/hardened-packages.md#package-attestations). + +## Helm chart attestations + +Docker Hardened Image (DHI) charts also include comprehensive signed attestations +that provide transparency and verification for your Kubernetes deployments. Like +DHI container images, these charts are built following SLSA Build Level 3 +practices and include extensive security metadata. + +DHI Helm charts include the following attestations. To view the specific predicate +type URIs for these attestations, use Docker Scout: + +```console +$ docker scout attest list dhi.io/: +``` + +For more details, see [Verify Helm chart attestations](../how-to/verify.md#verify-helm-chart-attestations-with-docker-scout). + +| Attestation type | Description | +|----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CycloneDX SBOM | A software bill of materials in [CycloneDX](https://cyclonedx.org/) format, listing the chart itself and all container images and tools referenced by the chart. | +| CVEs (In-Toto format) | A list of known vulnerabilities (CVEs) affecting the container images and components referenced by the chart. | +| Scout health score | A signed attestation from Docker Scout that summarizes the overall security and quality posture of the chart and its referenced images. | +| Scout provenance | Provenance metadata generated by Docker Scout, including the chart source repository, build images used, and build parameters. | +| Scout SBOM | An SBOM generated and signed by Docker Scout, including the chart and container images it references, with additional Docker-specific metadata. | +| Secrets scan | Results of a scan for accidentally included secrets, such as credentials, tokens, or private keys, in the chart package. | +| Tests | A record of automated tests run against the chart to validate functionality and compatibility with referenced images. | +| Virus scan | Results of antivirus scans performed on the chart package. For details, see [Malware scanning](../explore/malware-scanning.md). | +| CVEs (Scout format) | A vulnerability report generated by Docker Scout, listing known CVEs and severity data for the chart's referenced images. | +| SLSA provenance | A standard [SLSA](https://slsa.dev/) provenance statement describing how the chart was built, including build tool, source repository, referenced images, and build materials. | +| SPDX SBOM | An SBOM in [SPDX](https://spdx.dev/) format, listing the chart and all container images and tools it references. | + +## View and verify attestations + +To view and verify attestations, see [Verify a Docker Hardened +Image](../how-to/verify.md). + +## Add your own attestations + +In addition to the comprehensive attestations provided by Docker Hardened +Images, you can add your own signed attestations when building derivative +images. This is especially useful if you’re building new applications on top of +a DHI and want to maintain transparency, traceability, and trust in your +software supply chain. + +By attaching attestations such as SBOMs, build provenance, or custom metadata, +you can meet compliance requirements, pass security audits, and support policy +evaluation tools like Docker Scout. + +These attestations can then be verified downstream using tools +like Cosign or Docker Scout. + +To learn how to attach custom attestations during the build process, see [Build +attestations](/manuals/build/metadata/attestations.md). diff --git a/content/manuals/dhi/core-concepts/cis.md b/content/manuals/dhi/core-concepts/cis.md new file mode 100644 index 00000000000..0298a140655 --- /dev/null +++ b/content/manuals/dhi/core-concepts/cis.md @@ -0,0 +1,55 @@ +--- +title: CIS Benchmark +description: Learn how Docker Hardened Images comply with the CIS Docker Benchmark to help organizations harden container images for secure deployments. +keywords: docker cis benchmark, cis docker compliance, cis docker images, docker hardened images, secure container images +--- + +## What is the CIS Docker Benchmark? + +The [CIS Docker Benchmark](https://www.cisecurity.org/benchmark/docker) is part +of the globally recognized CIS Benchmarks, developed by the [Center for +Internet Security (CIS)](https://www.cisecurity.org/). It defines recommended secure +configurations for all aspects of the Docker container ecosystem, including the +container host, Docker daemon, container images, and the container runtime. + +## Why CIS Benchmark compliance matters + +Following the CIS Docker Benchmark helps organizations: + +- Reduce security risk with widely recognized hardening guidance. +- Meet regulatory or contractual requirements that reference CIS controls. +- Standardize image and Dockerfile practices across teams. +- Demonstrate audit readiness with configuration decisions grounded in a public standard. + +## How Docker Hardened Images comply with the CIS Benchmark + +Docker Hardened Images (DHIs) are designed with security in mind and are +verified to be compliant with the relevant controls from the CIS Docker +Benchmark for the scope that applies to container images and Dockerfile +configuration. + +CIS-compliant DHIs are compliant with all controls in Section 4, with the sole +exception of the control requiring Docker Content Trust (DCT), which [Docker +officially retired](https://www.docker.com/blog/retiring-docker-content-trust/). +Instead, DHIs are [signed](/manuals/dhi/core-concepts/signatures.md) using +Cosign, providing an even higher level of authenticity and integrity. By +starting from a CIS-compliant DHI, teams can adopt image-level best practices +from the benchmark more quickly and confidently. + +> [!NOTE] +> +> The CIS Docker Benchmark also includes controls for the host, daemon, and +> runtime. CIS-compliant DHIs address only the image and Dockerfile scope (Section +> 4). Overall compliance still depends on how you configure and operate the +> broader environment. + +## Identify CIS-compliant images + +CIS-compliant images are labeled as **CIS** in the Docker Hardened Images catalog. +To find them, [search the catalog](../how-to/explore.md) and look for the **CIS** +designation on individual listings. + +## Get the benchmark + +Download the latest CIS Docker Benchmark directly from CIS: +https://www.cisecurity.org/benchmark/docker diff --git a/content/manuals/dhi/core-concepts/cves.md b/content/manuals/dhi/core-concepts/cves.md new file mode 100644 index 00000000000..da7a1140cb6 --- /dev/null +++ b/content/manuals/dhi/core-concepts/cves.md @@ -0,0 +1,173 @@ +--- +title: Common Vulnerabilities and Exposures (CVEs) +linktitle: CVEs +description: Understand what CVEs are, how Docker Hardened Images reduce exposure, and how to scan images for vulnerabilities using popular tools. +keywords: docker cve scan, grype vulnerability scanner, trivy image scan, vex attestation, secure container images +--- + +## What are CVEs? + +CVEs are publicly disclosed cybersecurity flaws in software or hardware. Each +CVE is assigned a unique identifier (e.g., CVE-2024-12345) and includes a +standardized description, allowing organizations to track and address +vulnerabilities consistently. + +In the context of Docker, CVEs often pertain to issues within base images, or +application dependencies. These vulnerabilities can range from minor bugs to +critical security risks, such as remote code execution or privilege escalation. + +## Why are CVEs important? + +Regularly scanning and updating Docker images to mitigate CVEs is crucial for +maintaining a secure and compliant environment. Ignoring CVEs can lead to severe +security breaches, including: + +- Unauthorized access: Exploits can grant attackers unauthorized access to + systems. +- Data breaches: Sensitive information can be exposed or stolen. +- Service disruptions: Vulnerabilities can be leveraged to disrupt services or + cause downtime. +- Compliance violations: Failure to address known vulnerabilities can lead to + non-compliance with industry regulations and standards. + +## How Docker Hardened Images help mitigate CVEs + +Docker Hardened Images (DHIs) are crafted to minimize the risk of CVEs from the +outset. By adopting a security-first approach, DHIs offer several advantages in +CVE mitigation: + +- Reduced attack surface: DHIs are built using a distroless approach, stripping + away unnecessary components and packages. This reduction in image size, up to + 95% smaller than traditional images, limits the number of potential + vulnerabilities, making it harder for attackers to exploit unneeded software. + +- Faster CVE remediation: Maintained by Docker with an [enterprise-grade SLA](https://docs.docker.com/go/dhi-sla/), + DHIs are continuously updated to address known vulnerabilities. Critical and + high-severity CVEs are patched quickly, ensuring that your containers remain + secure without manual intervention. + +- Proactive vulnerability management: By utilizing DHIs, organizations can + proactively manage vulnerabilities. The images come with CVE and Vulnerability + Exposure (VEX) feeds, enabling teams to stay informed about potential threats + and take necessary actions promptly. + +## Scan images for CVEs + +Regularly scanning Docker images for CVEs is essential for maintaining a secure +containerized environment. While Docker Scout is integrated into Docker Desktop +and the Docker CLI, tools like Grype and Trivy offer alternative scanning +capabilities. The following are instructions for using each tool to scan Docker +images for CVEs. + +### Docker Scout + +Docker Scout is integrated into Docker Desktop and the Docker CLI. It provides +vulnerability insights, CVE summaries, and direct links to remediation guidance. + +#### Scan a DHI using Docker Scout + +To scan a Docker Hardened Image using Docker Scout, run the following +command: + +```console +$ docker scout cves dhi.io/: --platform +``` + +Example output: + +```plaintext + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + v VEX statements obtained from attestation + v No vulnerable package detected + ... +``` + +For more detailed filtering and JSON output, see [Docker Scout CLI reference](/reference/cli/docker/scout/). + +### Grype + +[Grype](https://github.com/anchore/grype) is an open-source scanner that checks +container images against vulnerability databases like the NVD and distro +advisories. + +#### Scan a DHI using Grype + +After installing Grype, you can scan a Docker Hardened Image by pulling +the image and running the scan command. Grype requires you to export the VEX +attestation to a file first: + +```console +$ docker pull dhi.io/: +$ docker scout vex get dhi.io/: --output vex.json +$ grype dhi.io/: --vex vex.json +``` + +Example output: + +```plaintext +NAME INSTALLED FIXED-IN TYPE VULNERABILITY SEVERITY EPSS% RISK +libperl5.36 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +perl 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +perl-base 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +... +``` + +### Trivy + +[Trivy](https://github.com/aquasecurity/trivy) is an open-source vulnerability +scanner for containers and other artifacts. It detects vulnerabilities in OS +packages and application dependencies. + +#### Scan a DHI using Trivy + +After installing Trivy, you can scan a Docker Hardened Image by pulling +the image and running the scan command: + +```console +$ docker pull dhi.io/: +$ trivy image --scanners vuln --vex repo dhi.io/: +``` + +Example output: + +```plaintext +Report Summary + +┌──────────────────────────────────────────────────────────────────────────────┬────────────┬─────────────────┬─────────┐ +│ Target │ Type │ Vulnerabilities │ Secrets │ +├──────────────────────────────────────────────────────────────────────────────┼────────────┼─────────────────┼─────────┤ +│ dhi.io/: (debian 12.11) │ debian │ 66 │ - │ +├──────────────────────────────────────────────────────────────────────────────┼────────────┼─────────────────┼─────────┤ +│ opt/python-3.13.4/lib/python3.13/site-packages/pip-25.1.1.dist-info/METADATA │ python-pkg │ 0 │ - │ +└──────────────────────────────────────────────────────────────────────────────┴────────────┴─────────────────┴─────────┘ +``` + +## Use VEX to filter known non-exploitable CVEs + +Docker Hardened Images include signed [VEX (Vulnerability Exploitability +eXchange)](./vex.md) attestations that identify vulnerabilities not relevant to the image’s +runtime behavior. + +When using Docker Scout or Trivy, these VEX statements are automatically +applied using the previous examples, and no manual configuration needed. + +To manually retrieve the VEX attestation for tools that support it: + +```console +$ docker scout vex get dhi.io/: --output vex.json +``` + +> [!NOTE] +> +> If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use +> `registry://dhi.io/python:3.13` instead of `dhi.io/python:3.13`. + +For example: + +```console +$ docker scout vex get dhi.io/python:3.13 --output vex.json +``` + +This creates a `vex.json` file containing the VEX statements for the specified +image. You can then use this file with tools that support VEX to filter out known non-exploitable CVEs. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/digests.md b/content/manuals/dhi/core-concepts/digests.md new file mode 100644 index 00000000000..27bdc244b45 --- /dev/null +++ b/content/manuals/dhi/core-concepts/digests.md @@ -0,0 +1,126 @@ +--- +title: Image digests +description: Learn how Docker Hardened Images help secure every stage of your software supply chain with signed metadata, provenance, and minimal attack surface. +keywords: docker image digest, pull image by digest, immutable container image, secure container reference, multi-platform manifest +--- + +## What are Docker image digests? + +A Docker image digest is a unique, cryptographic identifier (SHA-256 hash) +representing the content of a Docker image. Unlike tags, which can be reused or +changed, a digest is immutable and ensures that the exact same image is pulled +every time. This guarantees consistency across different environments and +deployments. + +For example, the digest for the `nginx:latest` image might look like: + +```text +sha256:94a00394bc5a8ef503fb59db0a7d0ae9e1110866e8aee8ba40cd864cea69ea1a +``` + +This digest uniquely identifies the specific version of the `nginx:latest` image, +ensuring that any changes to the image content result in a different digest. + +## Why are image digests important? + +Using image digests instead of tags offers several advantages: + +- Immutability: Once an image is built and its digest is generated, the content + tied to that digest cannot change. This means that if you pull an image using + its digest, you can be confident that you are retrieving exactly the same + image that was originally built. + +- Security: Digests help prevent supply chain attacks by ensuring that the image + content has not been tampered with. Even a small change in the image content + will result in a completely different digest. + +- Consistency: Using digests ensures that the same image is used across + different environments, reducing the risk of discrepancies between + development, staging, and production environments. + +## Docker Hardened Image digests + +By using image digests to reference DHIs, you can ensure that your applications are +always using the exact same secure image version, enhancing security and +compliance + +## View an image digest + +### Use the Docker CLI + +To view the image digest of a Docker image, you can use the following command. Replace +`:` with the image name and tag. + +```console +$ docker buildx imagetools inspect : +``` + +### Use the Docker Hub UI + +1. Go to [Docker Hub](https://hub.docker.com/) and sign in. +2. Navigate to your organization's namespace and open the mirrored DHI repository. +3. Select the **Tags** tab to view image variants. +4. Each tag in the list includes a **Digest** field showing the image's SHA-256 value. + +## Pull an image by digest + +Pulling an image by digest ensures that you are pulling the exact image version +identified by the specified digest. + +To pull a Docker image using its digest, use the following command. Replace +`` with the image name and `` with the image digest. + +```console +$ docker pull @sha256: +``` + +For example, to pull a `docs/dhi-python:3.13` image using its digest of +`94a00394bc5a8ef503fb59db0a7d0ae9e1110866e8aee8ba40cd864cea69ea1a`, you would +run: + +```console +$ docker pull docs/dhi-python@sha256:94a00394bc5a8ef503fb59db0a7d0ae9e1110866e8aee8ba40cd864cea69ea1a +``` + +## Multi-platform images and manifests + +Docker Hardened Images are published as multi-platform images, which means +a single image tag (like `docs/dhi-python:3.13`) can support multiple operating +systems and CPU architectures, such as `linux/amd64`, `linux/arm64`, and more. + +Instead of pointing to a single image, a multi-platform tag points to a manifest +list (also called an index), which is a higher-level object that references +multiple image digests, one for each supported platform. + +When you inspect a multi-platform image using `docker buildx imagetools inspect`, you'll see something like this: + +```text +Name: docs/dhi-python:3.13 +MediaType: application/vnd.docker.distribution.manifest.list.v2+json +Digest: sha256:6e05...d231 + +Manifests: + Name: docs/dhi-python:3.13@sha256:94a0...ea1a + Platform: linux/amd64 + ... + + Name: docs/dhi-python:3.13@sha256:7f1d...bc43 + Platform: linux/arm64 + ... +``` + +- The manifest list digest (`sha256:6e05...d231`) identifies the overall + multi-platform image. +- Each platform-specific image has its own digest (e.g., `sha256:94a0...ea1a` + for `linux/amd64`). + +### Why this matters + +- Reproducibility: If you're building or running containers on different + architectures, using a tag alone will resolve to the appropriate image digest + for your platform. +- Verification: You can pull and verify a specific image digest for your + platform to ensure you're using the exact image version, not just the manifest + list. +- Policy enforcement: When enforcing digest-based policies with Docker Scout, + each platform variant is evaluated individually using its digest. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/distroless.md b/content/manuals/dhi/core-concepts/distroless.md new file mode 100644 index 00000000000..c5b8a5d0b69 --- /dev/null +++ b/content/manuals/dhi/core-concepts/distroless.md @@ -0,0 +1,73 @@ +--- +title: Minimal or distroless images +linktitle: Distroless images +description: Learn how Docker Hardened Images use distroless variants to minimize attack surface and remove unnecessary components. +keywords: distroless container image, minimal docker image, secure base image, no shell container, reduced attack surface +--- + + +Minimal images, sometimes called distroless images, are container images +stripped of unnecessary components such as package managers, shells, or even the +underlying operating system distribution. Docker Hardened Images (DHI) embrace +this minimal approach to reduce vulnerabilities and enforce secure software +delivery. [Docker Official +Images](../../docker-hub/image-library/trusted-content.md#docker-official-images) +and [Docker Verified Publisher +Images](../../docker-hub/image-library/trusted-content.md#verified-publisher-images) +follow similar best practices for minimalism and security but may not be as +stripped down to ensure compatibility with a wider range of use cases. + +## What are minimal or distroless images? + +Traditional container images include a full OS, often more than what is needed +to run an application. In contrast, minimal or distroless images include only: + +- The application binary +- Its runtime dependencies (e.g., libc, Java, Python) +- Any explicitly required configuration or metadata + +They typically exclude: + +- OS tools (e.g., `ls`, `ps`, `cat`) +- Shells (e.g., `sh`, `bash`) +- Package managers (e.g., `apt`, `apk`) +- Debugging utilities (e.g., `curl`, `wget`, `strace`) + +Docker Hardened Images are based on this model, ensuring a smaller and more +secure runtime surface. + +## What you gain + +| Benefit | Description | +|------------------------|-------------------------------------------------------------------------------| +| Smaller attack surface | Fewer components mean fewer vulnerabilities and less exposure to CVEs | +| Faster startup | Smaller image sizes result in faster pull and start times | +| Improved security | Lack of shell and package manager limits what attackers can do if compromised | +| Better compliance | Easier to audit and verify, especially with SBOMs and attestations | + +## Addressing common tradeoffs + +Minimal and distroless images offer strong security benefits, but they can +change how you work with containers. Docker Hardened Images are designed to +maintain productivity while enhancing security. + +| Concern | How Docker Hardened Images help | +|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Debuggability | Hardened images exclude shells and CLI tools by default. Use [Docker Debug](/reference/cli/docker/debug/) to temporarily attach a debug sidecar for troubleshooting without modifying the original container. | +| Familiarity | DHI supports multiple base images, including Alpine and Debian variants, so you can choose a familiar environment while still benefiting from hardening practices. | +| Flexibility | Runtime immutability helps secure your containers. Use multi-stage builds and CI/CD to control changes, and optionally use dev-focused base images during development. | + +By balancing minimalism with practical tooling, Docker Hardened Images support +modern development workflows without compromising on security or reliability. + +## Best practices for using minimal images + +- Use multi-stage builds to separate build-time and runtime environments +- Validate image behavior using CI pipelines, not interactive inspection +- Include runtime-specific dependencies explicitly in your Dockerfile +- Use Docker Scout to continuously monitor for CVEs, even in minimal images + +By adopting minimal or distroless images through Docker Hardened Images, you +gain a more secure, predictable, and production-ready container environment +that's designed for automation, clarity, and reduced risk. + diff --git a/content/manuals/dhi/core-concepts/fips.md b/content/manuals/dhi/core-concepts/fips.md new file mode 100644 index 00000000000..3e425e64551 --- /dev/null +++ b/content/manuals/dhi/core-concepts/fips.md @@ -0,0 +1,122 @@ +--- +title: FIPS +description: Learn how Docker Hardened Images support FIPS 140 through validated cryptographic modules to help organizations meet compliance requirements. +keywords: docker fips, fips 140 images, fips docker images, docker compliance, secure container images +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +## What is FIPS 140? + +[FIPS 140](https://csrc.nist.gov/publications/detail/fips/140/3/final) is a U.S. +government standard that defines security requirements for cryptographic modules +that protect sensitive information. It is widely used in regulated environments +such as government, healthcare, and financial services. + +FIPS certification is managed by the [NIST Cryptographic Module Validation +Program +(CMVP)](https://csrc.nist.gov/projects/cryptographic-module-validation-program), +which ensures cryptographic modules meet rigorous security standards. + +## Why FIPS compliance matters + +FIPS 140 compliance is required or strongly recommended in many regulated +environments where sensitive data must be protected, such as government, +healthcare, finance, and defense. These standards ensure that cryptographic +operations are performed using vetted, trusted algorithms implemented in secure +modules. + +Using software components that rely on validated cryptographic modules can help organizations: + +- Satisfy federal and industry mandates, such as FedRAMP, which require or + strongly recommend FIPS 140-validated cryptography. +- Demonstrate audit readiness, with verifiable evidence of secure, + standards-based cryptographic practices. +- Reduce security risk, by blocking unapproved or unsafe algorithms (e.g., MD5) + and ensuring consistent behavior across environments. + +## How Docker Hardened Images support FIPS compliance + +While Docker Hardened Images are available to all, the FIPS variant requires a +paid Docker Hardened Images subscription. + +Docker Hardened Images (DHIs) include variants that use cryptographic modules +validated under FIPS 140. These images are intended to help organizations meet +compliance requirements by incorporating components that meet the standard. + +- FIPS image variants use cryptographic modules that are already validated under + FIPS 140. +- These variants are built and maintained by Docker to support environments with + regulatory or compliance needs. +- Docker provides signed test attestations that document the use of validated + cryptographic modules. These attestations can support internal audits and + compliance reporting. +- Entropy sources (random number generation for cryptographic operations) vary + by base image. Debian-based images use the OpenSSL entropy source, while + Alpine-based images source entropy from the host kernel. + +> [!NOTE] +> +> Using a FIPS image variant helps meet compliance requirements but does not +> make an application or system fully compliant. Compliance depends on how the +> image is integrated and used within the broader system. + +## Identify images that support FIPS + +Docker Hardened Images that support FIPS are marked as **FIPS** compliant +in the Docker Hardened Images catalog. + +To find DHI repositories with FIPS image variants, [search the catalog](../how-to/explore.md) and: + +- Use the **FIPS** filter on the catalog page +- Look for **FIPS** compliant on individual image listings + +These indicators help you quickly locate repositories that support FIPS-based +compliance needs. Image variants that include FIPS support will have a tag +ending with `-fips`, such as `3.13-fips`. + +## Use a FIPS variant + +To use a FIPS variant, you must [mirror](../how-to/mirror.md) the repository +and then pull the FIPS image from your mirrored repository. + +## View the FIPS attestation + +The FIPS variants of Docker Hardened Images contain a FIPS attestation that +lists the actual cryptographic modules included in the image. + +You can retrieve and inspect the FIPS attestation using the Docker Scout CLI: + +```console +$ docker scout attest get \ + --predicate-type https://docker.com/dhi/fips/v0.1 \ + --predicate \ + dhi.io/: +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://docker.com/dhi/fips/v0.1 \ + --predicate \ + dhi.io/python:3.13-fips +``` + +The attestation output is a JSON array describing the cryptographic modules +included in the image and their compliance status. For example: + +```json +[ + { + "certification": "CMVP #4985", + "certificationUrl": "https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4985", + "name": "OpenSSL FIPS Provider", + "package": "pkg:dhi/openssl-provider-fips@3.1.2", + "standard": "FIPS 140-3", + "status": "active", + "sunsetDate": "2030-03-10", + "version": "3.1.2" + } +] +``` \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/glibc-musl.md b/content/manuals/dhi/core-concepts/glibc-musl.md new file mode 100644 index 00000000000..1ef7cdfa45f --- /dev/null +++ b/content/manuals/dhi/core-concepts/glibc-musl.md @@ -0,0 +1,58 @@ +--- +title: glibc and musl support in Docker Hardened Images +linktitle: glibc and musl +description: Compare glibc and musl variants of DHIs to choose the right base image for your application’s compatibility, size, and performance needs. +keywords: glibc vs musl, alpine musl image, debian glibc container, docker hardened images compatibility, c library in containers +--- + +Docker Hardened Images (DHI) are built to prioritize security without +sacrificing compatibility with the broader open source and enterprise software +ecosystem. A key aspect of this compatibility is support for common Linux +standard libraries: `glibc` and `musl`. + +## What are glibc and musl? + +When you run Linux-based containers, the image's C library plays a key role in +how applications interact with the operating system. Most modern Linux +distributions rely on one of the following standard C libraries: + +- `glibc` (GNU C Library): The standard C library on mainstream distributions + like Debian, Ubuntu, and Red Hat Enterprise Linux. It is widely supported and + typically considered the most compatible option across languages, frameworks, + and enterprise software. + +- `musl`: A lightweight alternative to `glibc`, commonly used in minimal + distributions like Alpine Linux. While it offers smaller image sizes and + performance benefits, `musl` is not always fully compatible with software that + expects `glibc`. + +## DHI compatibility + +DHI images are available in both `glibc`-based (e.g., Debian) and `musl`-based +(e.g., Alpine) variants. For enterprise applications and language runtimes where +compatibility is critical, we recommend using DHI images based on glibc. + +## What to choose, glibc or musl? + +Docker Hardened Images are available in both glibc-based (Debian) and musl-based +(Alpine) variants, allowing you to choose the best fit for your workload. + +Choose Debian-based (`glibc`) images if: + +- You need broad compatibility with enterprise workloads, language runtimes, or + proprietary software. +- You're using ecosystems like .NET, Java, or Python with native extensions that + depend on `glibc`. +- You want to minimize the risk of runtime errors due to library + incompatibilities. + +Choose Alpine-based (`musl`) images if: + +- You want a minimal footprint with smaller image sizes and reduced surface + area. +- You're building a custom or tightly controlled application stack where + dependencies are known and tested. +- You prioritize startup speed and lean deployments over maximum compatibility. + +If you're unsure, start with a Debian-based image to ensure compatibility, and +evaluate Alpine once you're confident in your application's dependencies. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/hardening.md b/content/manuals/dhi/core-concepts/hardening.md new file mode 100644 index 00000000000..105b527eff6 --- /dev/null +++ b/content/manuals/dhi/core-concepts/hardening.md @@ -0,0 +1,80 @@ +--- +title: Base image hardening +linktitle: Hardening +description: Learn how Docker Hardened Images are designed for security, with minimal components, nonroot execution, and secure-by-default configurations. +keywords: hardened base image, minimal container image, non-root containers, secure container configuration, remove package manager +--- + +## What is base image hardening? + +Base image hardening is the process of securing the foundational layers of a +container image by minimizing what they include and configuring them with +security-first defaults. A hardened base image removes unnecessary components, +like shells, compilers, and package managers, which limits the available attack +surface, making it more difficult for an attacker to gain control or escalate +privileges inside the container. + +Hardening also involves applying best practices like running as a non-root user, +reducing writable surfaces, and ensuring consistency through immutability. While +[Docker Official +Images](../../docker-hub/image-library/trusted-content.md#docker-official-images) +and [Docker Verified Publisher +Images](../../docker-hub/image-library/trusted-content.md#verified-publisher-images) +follow best practices for security, they may not be as hardened as Docker +Hardened Images, as they are designed to support a broader range of use cases. + +## Why is it important? + +Most containers inherit their security posture from the base image they use. If +the base image includes unnecessary tools or runs with elevated privileges, +every container built on top of it is exposed to those risks. + +Hardening the base image: + +- Reduces the attack surface by removing tools and libraries that could be exploited +- Enforces least privilege by dropping root access and restricting what the container can do +- Improves reliability and consistency by avoiding runtime changes and drift +- Aligns with secure software supply chain practices and helps meet compliance standards + +Using hardened base images is a critical first step in securing the software you +build and run in containers. + +## What's removed and why + +Hardened images typically exclude common components that are risky or unnecessary in secure production environments: + +| Removed component | Reason | +|--------------------------------------------------|----------------------------------------------------------------------------------| +| Shells (e.g., `sh`, `bash`) | Prevents users or attackers from executing arbitrary commands inside containers | +| Package managers (e.g., `apt`, `apk`) | Disables the ability to install software post-build, reducing drift and exposure | +| Compilers and interpreters | Avoids introducing tools that could be used to run or inject malicious code | +| Debugging tools (e.g., `strace`, `curl`, `wget`) | Reduces risk of exploitation or information leakage | +| Unused libraries or locales | Shrinks image size and minimizes attack vectors | + +## How Docker Hardened Images apply base image hardening + +Docker Hardened Images (DHIs) apply base image hardening principles by design. +Each image is constructed to include only what is necessary for its specific +purpose, whether that’s building applications (with `-dev` or `-sdk` tags) or +running them in production. + +### Docker Hardened Image traits + +Docker Hardened Images are built to be: + +- Minimal: Only essential libraries and binaries are included +- Immutable: Images are fixed at build time—no runtime installations +- Non-root by default: Containers run as an unprivileged user unless configured otherwise +- Purpose-scoped: Different tags are available for development (`-dev`), SDK-based builds (`-sdk`), and production runtime + +These characteristics help enforce consistent, secure behavior across development, testing, and production environments. + +### Docker Hardened Image compatibility considerations + +Because Docker Hardened Images strip out many common tools, they may not work out of the box for all use cases. You may need to: + +- Use multi-stage builds to compile code or install dependencies in a `-dev` image and copy the output into a hardened runtime image +- Replace shell scripts with equivalent entrypoint binaries or explicitly include a shell if needed +- Use [Docker Debug](/reference/cli/docker/debug/) to temporarily inspect or troubleshoot containers without altering the base image + +These trade-offs are intentional and help support best practices for building secure, reproducible, and production-ready containers. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/immutability.md b/content/manuals/dhi/core-concepts/immutability.md new file mode 100644 index 00000000000..c6cfb114468 --- /dev/null +++ b/content/manuals/dhi/core-concepts/immutability.md @@ -0,0 +1,57 @@ +--- +title: Immutable infrastructure +linktitle: Immutability +description: Understand how image digests, read-only containers, and signed metadata ensure Docker Hardened Images are tamper-resistant and immutable. +keywords: immutable container image, read-only docker image, configuration drift prevention, secure redeployment, image digest verification +--- + +Immutable infrastructure is a security and operations model where components +such as servers, containers, and images are never modified after deployment. +Instead of patching or reconfiguring live systems, you replace them entirely +with new versions. + +When using Docker Hardened Images, immutability is a best practice that +reinforces the security posture of your software supply chain. + +## Why immutability matters + +Mutable systems are harder to secure and audit. Live patching or manual updates +introduce risks such as: + +- Configuration drift +- Untracked changes +- Inconsistent environments +- Increased attack surface + +Immutable infrastructure solves this by making changes only through controlled, +repeatable builds and deployments. + +## How Docker Hardened Images support immutability + +Docker Hardened Images are built to be minimal, locked-down, and +non-interactive, which discourages in-place modification. For example: + +- Many DHI images exclude shells, package managers, and debugging tools +- DHI images are designed to be scanned and signed before deployment +- DHI users are encouraged to rebuild and redeploy images rather than patch running containers + +This design aligns with immutable practices and ensures that: + +- Updates go through the CI/CD pipeline +- All changes are versioned and auditable +- Systems can be rolled back or reproduced consistently + +## Immutable patterns in practice + +Some common patterns that leverage immutability include: + +- Container replacement: Instead of logging into a container to fix a bug or + apply a patch, rebuild the image and redeploy it. +- Infrastructure as Code (IaC): Define your infrastructure and image + configurations in version-controlled files. +- Blue/Green or Canary deployments: Roll out new images alongside old ones and + gradually shift traffic to the new version. + +By combining immutable infrastructure principles with hardened images, you +create a predictable and secure deployment workflow that resists tampering and +minimizes long-term risk. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/provenance.md b/content/manuals/dhi/core-concepts/provenance.md new file mode 100644 index 00000000000..5c8adcb425e --- /dev/null +++ b/content/manuals/dhi/core-concepts/provenance.md @@ -0,0 +1,71 @@ +--- +title: Image provenance +description: Learn how build provenance metadata helps trace the origin of Docker Hardened Images and support compliance with SLSA. +keywords: image provenance, container build traceability, slsa compliance, signed container image, software supply chain trust +--- + +## What is image provenance? + +Image provenance refers to metadata that traces the origin, authorship, and +integrity of a container image. It answers critical questions such as: + +- Where did this image come from? +- Who built it? +- Has it been tampered with? + +Provenance establishes a chain of custody, helping you verify that the image +you're using is the result of a trusted and verifiable build process. + +## Why image provenance matters + +Provenance is foundational to securing your software supply chain. Without it, you risk: + +- Running unverified or malicious images +- Failing to meet internal or regulatory compliance requirements +- Losing visibility into the components and workflows that produce your containers + +With reliable provenance, you gain: + +- Trust: Know that your images are authentic and unchanged. +- Traceability: Understand the full build process and source inputs. +- Auditability: Provide verifiable evidence of compliance and build integrity. + +Provenance also supports automated policy enforcement and is a key requirement +for frameworks like SLSA (Supply-chain Levels for Software Artifacts). + +## How Docker Hardened Images support provenance + +Docker Hardened Images (DHIs) are designed with built-in provenance to help you +adopt secure-by-default practices and meet supply chain security standards. + +### Attestations + +DHIs include [attestations](./attestations.md)—machine-readable metadata that +describe how, when, and where the image was built. These are generated using +industry standards such as [in-toto](https://in-toto.io/) and align with [SLSA +provenance](https://slsa.dev/spec/v1.0/provenance/). + +Attestations allow you to: + +- Validate that builds followed the expected steps +- Confirm that inputs and environments meet policy +- Trace the build process across systems and stages + +### Code signing + +Each Docker Hardened Image is cryptographically [signed](./signatures.md) and +stored in the registry alongside its digest. These signatures are verifiable +proofs of authenticity and are compatible with tools like `cosign`, Docker +Scout, and Kubernetes admission controllers. + +With image signatures, you can: + +- Confirm that the image was published by Docker +- Detect if an image has been modified or republished +- Enforce signature validation in CI/CD or production deployments + +## Additional resources + +- [Provenance attestations](/build/metadata/attestations/slsa-provenance/) +- [Image signatures](./signatures.md) +- [Attestations overview](./attestations.md) \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/sbom.md b/content/manuals/dhi/core-concepts/sbom.md new file mode 100644 index 00000000000..06dc2b67a31 --- /dev/null +++ b/content/manuals/dhi/core-concepts/sbom.md @@ -0,0 +1,94 @@ +--- +title: Software Bill of Materials (SBOMs) +linktitle: SBOMs +description: Learn what SBOMs are, why they matter, and how Docker Hardened Images include signed SBOMs to support transparency and compliance. +keywords: sbom docker image, software bill of materials, signed sbom, container sbom verification, sbom compliance +--- + +## What is an SBOM? + +An SBOM is a detailed inventory that lists all components, libraries, and +dependencies used in building a software application. It provides transparency +into the software supply chain by documenting each component's version, origin, +and relationship to other components. Think of it as a "recipe" for your +software, detailing every ingredient and how they come together. + +Metadata included in an SBOM for describing software artifacts may include: + +- Name of the artifact +- Version +- License type +- Authors +- Unique package identifier + +## Why are SBOMs important? + +In today's software landscape, applications often comprise numerous components +from various sources, including open-source libraries, third-party services, and +proprietary code. This complexity can obscure visibility into potential +vulnerabilities and complicate compliance efforts. SBOMs address these +challenges by providing a detailed inventory of all components within an +application. + + +The significance of SBOMs is underscored by several key factors: + +- Enhanced transparency: SBOMs offer a comprehensive view of all components that + constitute an application, enabling organizations to identify and assess risks + associated with third-party libraries and dependencies. + +- Proactive vulnerability management: By maintaining an up-to-date SBOM, + organizations can swiftly identify and address vulnerabilities in software + components, reducing the window of exposure to potential exploits. + +- Regulatory compliance: Many regulations and industry standards now require + organizations to maintain control over the software components they use. An + SBOM facilitates compliance by providing a clear and accessible record. + +- Improved incident response: In the event of a security breach, an SBOM + enables organizations to quickly identify affected components and take + appropriate action, minimizing potential damage. + +## Docker Hardened Image SBOMs + +Docker Hardened Images come with built-in SBOMs, ensuring that every component +in the image is documented and verifiable. These SBOMs are cryptographically +signed, providing a tamper-evident record of the image's contents. This +integration simplifies audits and enhances trust in the software supply chain. + +## View SBOMs in Docker Hardened Images + +To view the SBOM of a Docker Hardened Image, you can use the `docker scout sbom` +command. Replace `:` with the image name and tag. + +```console +$ docker scout sbom dhi.io/: +``` + +## Verify the SBOM of a Docker Hardened Image + +Since Docker Hardened Images come with signed SBOMs, you can use Docker Scout to +verify the authenticity and integrity of the SBOM attached to the image. This +ensures that the SBOM has not been tampered with and that the image's contents +are trustworthy. + +To verify the SBOM of a Docker Hardened Image using Docker Scout, use the following command: + +```console +$ docker scout attest get dhi.io/: \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify --platform +``` + +For example, to verify the SBOM attestation for the `node:20.19-debian12` image: + +```console +$ docker scout attest get dhi.io/node:20.19-debian12 \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify --platform linux/amd64 +``` + +## Resources + +For more details about SBOM attestations and Docker Build, see [SBOM +attestations](/build/metadata/attestations/sbom/). + +To learn more about Docker Scout and working with SBOMs, see [Docker Scout SBOMs](../../scout/how-tos/view-create-sboms.md). \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/signatures.md b/content/manuals/dhi/core-concepts/signatures.md new file mode 100644 index 00000000000..6922f3507cf --- /dev/null +++ b/content/manuals/dhi/core-concepts/signatures.md @@ -0,0 +1,104 @@ +--- +title: Code signing +description: Understand how Docker Hardened Images are cryptographically signed using Cosign to verify authenticity, integrity, and secure provenance. +keywords: container image signing, cosign docker image, verify image signature, signed container image, sigstore cosign +--- + +## What is code signing? + +Code signing is the process of applying a cryptographic signature to software +artifacts, such as Docker images, to verify their integrity and authenticity. By +signing an image, you ensure that it has not been altered since it was signed +and that it originates from a trusted source. + +In the context of Docker Hardened Images (DHIs), code signing is achieved using +[Cosign](https://docs.sigstore.dev/), a tool developed by the Sigstore project. +Cosign enables secure and verifiable signing of container images, enhancing +trust and security in the software supply chain. + +## Why is code signing important? + +Code signing plays a crucial role in modern software development and +cybersecurity: + +- Authenticity: Verifies that the image was created by a trusted source. +- Integrity: Ensures that the image has not been tampered with since it was + signed. +- Compliance: Helps meet regulatory and organizational security requirements. + +## Docker Hardened Image code signing + +Each DHI is cryptographically signed using Cosign, ensuring that the images have +not been tampered with and originate from a trusted source. + +## Why sign your own images? + +Docker Hardened Images are signed by Docker to prove their origin and integrity, +but if you're building application images that extend or use DHIs as a base, you +should sign your own images as well. + +By signing your own images, you can: + +- Prove the image was built by your team or pipeline +- Ensure your build hasn't been tampered with after it's pushed +- Support software supply chain frameworks like SLSA +- Enable image verification in deployment workflows + +This is especially important in CI/CD environments where you build and push +images frequently, or in any scenario where image provenance must be auditable. + +## How to view and use code signatures + +### View signatures + +You can verify that a Docker Hardened Image is signed and trusted using either Docker Scout or Cosign. + +To lists all attestations, including signature metadata, attached to the image, use the following command: + +```console +$ docker scout attest list : +``` + +> [!NOTE] +> +> If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use +> `registry://dhi.io/python` instead of `dhi.io/python`. + +To verify a specific signed attestation (e.g., SBOM, VEX, provenance): + +```console +$ docker scout attest get \ + --predicate-type \ + --verify \ + : +``` + +> [!NOTE] +> +> If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use +> `registry://dhi.io/python:3.13` instead of `dhi.io/python:3.13`. + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + --verify \ + dhi.io/python:3.13 +``` + +If valid, Docker Scout will confirm the signature and display signature payload, as well as the equivalent Cosign command to verify the image. + +### Sign images + +To sign a Docker image, use [Cosign](https://docs.sigstore.dev/). Replace +`:` with the image name and tag. + +```console +$ cosign sign : +``` + +This command will prompt you to authenticate via an OIDC provider (such as +GitHub, Google, or Microsoft). Upon successful authentication, Cosign will +generate a short-lived certificate and sign the image. The signature will be +stored in a transparency log and associated with the image in the registry. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/slsa.md b/content/manuals/dhi/core-concepts/slsa.md new file mode 100644 index 00000000000..ad4b254bdab --- /dev/null +++ b/content/manuals/dhi/core-concepts/slsa.md @@ -0,0 +1,105 @@ +--- +title: Supply-chain Levels for Software Artifacts (SLSA) +linktitle: SLSA +description: Learn how Docker Hardened Images comply with SLSA Build Level 3 and how to verify provenance for secure, tamper-resistant builds. +keywords: slsa docker compliance, slsa build level 3, supply chain security, verified build provenance, secure container build +--- + +## What is SLSA? + +Supply-chain Levels for Software Artifacts (SLSA) is a security framework +designed to enhance the integrity and security of software supply chains. +Developed by Google and maintained by the Open Source Security Foundation +(OpenSSF), SLSA provides a set of guidelines and best practices to prevent +tampering, improve integrity, and secure packages and infrastructure in software +projects. + +SLSA defines [four build levels (0–3)](https://slsa.dev/spec/latest/build-track-basics) of +increasing security rigor, focusing on areas such as build provenance, source +integrity, and build environment security. Each level builds upon the previous +one, offering a structured approach to achieving higher levels of software +supply chain security. + +## Why is SLSA important? + +SLSA is crucial for modern software development due to the increasing complexity +and interconnectedness of software supply chains. Supply chain attacks, such as +the SolarWinds breach, have highlighted the vulnerabilities in software +development processes. By implementing SLSA, organizations can: + +- Ensure artifact integrity: Verify that software artifacts have not been + tampered with during the build and deployment processes. + +- Enhance build provenance: Maintain verifiable records of how and when software + artifacts were produced, providing transparency and accountability. + +- Secure build environments: Implement controls to protect build systems from + unauthorized access and modifications. + +- Mitigate supply chain risks: Reduce the risk of introducing vulnerabilities or + malicious code into the software supply chain. + +## What is SLSA Build Level 3? + +SLSA Build Level 3, Hardened Builds, is the highest of four progressive levels in +the SLSA framework. It introduces strict requirements to ensure that software +artifacts are built securely and traceably. To meet Level 3, a build must: + +- Be fully automated and scripted to prevent manual tampering +- Use a trusted build service that enforces source and builder authentication +- Generate a signed, tamper-resistant provenance record describing how the artifact was built +- Capture metadata about the build environment, source repository, and build steps + +This level provides strong guarantees that the software was built from the +expected source in a controlled, auditable environment, which significantly +reduces the risk of supply chain attacks. + +## Docker Hardened Images and SLSA + +Docker Hardened Images (DHIs) are secure-by-default container images +purpose-built for modern production environments. Each DHI is cryptographically +signed and complies with the [SLSA Build Level 3 +standard](https://slsa.dev/spec/latest/build-track-basics#build-l3), ensuring +verifiable build provenance and integrity. + +By integrating SLSA-compliant DHIs into your development and deployment processes, you can: + +- Achieve higher security levels: Utilize images that meet stringent security + standards, reducing the risk of vulnerabilities and attacks. + +- Simplify compliance: Leverage built-in features like signed Software Bills of + Materials (SBOMs) and vulnerability exception (VEX) statements to facilitate + compliance with regulations such as FedRAMP. + +- Enhance transparency: Access detailed information about the components and + build process of each image, promoting transparency and trust. + +- Streamline audits: Utilize verifiable build records and signatures to simplify + security audits and assessments. + +## Get and verify SLSA provenance for Docker Hardened Images + +Each Docker Hardened Image (DHI) is cryptographically signed and includes +attestations. These attestations provide verifiable build provenance and +demonstrate adherence to SLSA Build Level 3 standards. + +To get and verify SLSA provenance for a DHI, you can use Docker Scout. + +```console +$ docker scout attest get dhi.io/: \ + --predicate-type https://slsa.dev/provenance/v0.2 \ + --verify +``` + +For example: + +```console +$ docker scout attest get dhi.io/node:20.19-debian12 \ + --predicate-type https://slsa.dev/provenance/v0.2 \ + --verify +``` + +## Resources + +For more details about SLSA definitions and Docker Build, see [SLSA +definitions](/build/metadata/attestations/slsa-definitions/). \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/sscs.md b/content/manuals/dhi/core-concepts/sscs.md new file mode 100644 index 00000000000..8606b6c377b --- /dev/null +++ b/content/manuals/dhi/core-concepts/sscs.md @@ -0,0 +1,52 @@ +--- +title: Software Supply Chain Security +linktitle: Software Supply Chain Security +description: Learn how Docker Hardened Images help secure every stage of your software supply chain with signed metadata, provenance, and minimal attack surface. +keywords: software supply chain security, secure container images, signed image provenance, docker sbom, distroless security +--- + +## What is Software Supply Chain Security (SSCS)? + +SSCS encompasses practices and strategies designed to safeguard the entire +lifecycle of software development from initial code creation to deployment and +maintenance. It focuses on securing all components. This includes code, +dependencies, build processes, and distribution channels in order to prevent +malicious actors from compromising the software supply chain. Given the +increasing reliance on open-source libraries and third-party components, +ensuring the integrity and security of these elements is paramount + +## Why is SSCS important? + +The significance of SSCS has escalated due to sophisticated cyberattacks +targeting software supply chains. High-profile supply chain attacks and the +exploitation of vulnerabilities in open-source components underscore the +critical need for robust supply chain security measures. Compromises at any +stage of the software lifecycle can lead to widespread vulnerabilities, data +breaches, and significant financial losses. + +## How Docker Hardened Images contribute to SSCS + +Docker Hardened Images (DHI) are purpose-built container images designed with +security at their core, addressing the challenges of modern software supply +chain security. By integrating DHI into your development and deployment +pipelines, you can enhance your organization's SSCS posture through the +following features: + +- Minimal attack surface: DHIs are engineered to be ultra-minimal, stripping + away unnecessary components and reducing the attack surface by up to 95%. This + distroless approach minimizes potential entry points for malicious actors. + +- Cryptographic signing and provenance: Each DHI is cryptographically signed, + ensuring authenticity and integrity. Build provenance is maintained, providing + verifiable evidence of the image's origin and build process, aligning with + standards like SLSA (Supply-chain Levels for Software Artifacts). + +- Software Bill of Materials (SBOM): DHIs include a comprehensive SBOM, + detailing all components and dependencies within the image. This transparency + aids in vulnerability management and compliance tracking, enabling teams to + assess and mitigate risks effectively. + +- Continuous maintenance and rapid CVE remediation: Docker maintains DHIs with + regular updates and security patches, backed by an [SLA for addressing critical + and high-severity vulnerabilities](https://docs.docker.com/go/dhi-sla/). This proactive approach helps ensure that + images remain secure and compliant with enterprise standards. diff --git a/content/manuals/dhi/core-concepts/ssdlc.md b/content/manuals/dhi/core-concepts/ssdlc.md new file mode 100644 index 00000000000..c1d2c864c16 --- /dev/null +++ b/content/manuals/dhi/core-concepts/ssdlc.md @@ -0,0 +1,113 @@ +--- +title: Secure Software Development Lifecycle +linktitle: SSDLC +description: See how Docker Hardened Images support a secure SDLC by integrating with scanning, signing, and debugging tools. +keywords: secure software development, ssdlc containers, slsa compliance, docker scout integration, secure container debugging +--- + +## What is a Secure Software Development Lifecycle? + +A Secure Software Development Lifecycle (SSDLC) integrates security practices +into every phase of software delivery, from design and development to deployment +and monitoring. It’s not just about writing secure code, but about embedding +security throughout the tools, environments, and workflows used to build and +ship software. + +SSDLC practices are often guided by compliance frameworks, organizational +policies, and supply chain security standards such as SLSA (Supply-chain Levels +for Software Artifacts) or NIST SSDF. + +## Why SSDLC matters + +Modern applications depend on fast, iterative development, but rapid delivery +often introduces security risks if protections aren’t built in early. An SSDLC +helps: + +- Prevent vulnerabilities before they reach production +- Ensure compliance through traceable and auditable workflows +- Reduce operational risk by maintaining consistent security standards +- Enable secure automation in CI/CD pipelines and cloud-native environments + +By making security a first-class citizen in each stage of software delivery, +organizations can shift left and reduce both cost and complexity. + +## How Docker supports a secure SDLC + +Docker provides tools and secure content that make SSDLC practices easier to +adopt across the container lifecycle. With [Docker Hardened +Images](../_index.md) (DHIs), [Docker +Debug](/reference/cli/docker/debug/), and [Docker +Scout](../../../manuals/scout/_index.md), teams can add security without losing +velocity. + +### Plan and design + +During planning, teams define architectural constraints, compliance goals, and +threat models. Docker Hardened Images help at this stage by providing: + +- Secure-by-default base images for common languages and runtimes +- Verified metadata including SBOMs, provenance, and VEX documents +- Support for both glibc and musl across multiple Linux distributions + +You can use DHI metadata and attestations to support design reviews, threat +modeling, or architecture sign-offs. + +### Develop + +In development, security should be transparent and easy to apply. Docker +Hardened Images support secure-by-default development: + +- Dev variants include shells, package managers, and compilers for convenience +- Minimal runtime variants reduce attack surface in final images +- Multi-stage builds let you separate build-time tools from runtime environments + +[Docker Debug](/reference/cli/docker/debug/) helps developers: + +- Temporarily inject debugging tools into minimal containers +- Avoid modifying base images during troubleshooting +- Investigate issues securely, even in production-like environments + +### Build and test + +Build pipelines are an ideal place to catch issues early. Docker Scout +integrates with Docker Hub and the CLI to: + +- Scan for known CVEs using multiple vulnerability databases +- Trace vulnerabilities to specific layers and dependencies +- Interpret signed VEX data to suppress known-irrelevant issues +- Export JSON scan reports for CI/CD workflows + +Build pipelines that use Docker Hardened Images benefit from: + +- Reproducible, signed images +- Minimal build surfaces to reduce exposure +- Built-in compliance with SLSA Build Level 3 standards + +### Release and deploy + +Security automation is critical as you release software at scale. Docker +supports this phase by enabling: + +- Signature verification and provenance validation before deployment +- Policy enforcement gates using Docker Scout +- Safe, non-invasive container inspection using Docker Debug + +DHIs ship with the metadata and signatures required to automate image +verification during deployment. + +### Monitor and improve + +Security continues after release. With Docker tools, you can: + +- Continuously monitor image vulnerabilities through Docker Hub +- Get CVE remediation guidance and patch visibility using Docker Scout +- Receive updated DHI images with rebuilt and re-signed secure layers +- Debug running workloads with Docker Debug without modifying the image + +## Summary + +Docker helps teams embed security throughout the SSDLC by combining secure +content (DHIs) with developer-friendly tooling (Docker Scout and Docker Debug). +These integrations promote secure practices without introducing friction, making +it easier to adopt compliance and supply chain security across your software +delivery lifecycle. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/stig.md b/content/manuals/dhi/core-concepts/stig.md new file mode 100644 index 00000000000..63265405463 --- /dev/null +++ b/content/manuals/dhi/core-concepts/stig.md @@ -0,0 +1,133 @@ +--- +title: STIG +description: Learn how Docker Hardened Images provide STIG-ready container images with verifiable security scan attestations for government and enterprise compliance requirements. +keywords: docker stig, stig-ready images, stig guidance, openscap docker, secure container images +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +## What is STIG? + +[Security Technical Implementation Guides +(STIGs)](https://public.cyber.mil/stigs/) are configuration standards published +by the U.S. Defense Information Systems Agency (DISA). They define security +requirements for operating systems, applications, databases, and other +technologies used in U.S. Department of Defense (DoD) environments. + +STIGs help ensure that systems are configured securely and consistently to +reduce vulnerabilities. They are often based on broader requirements like the +DoD's General Purpose Operating System Security Requirements Guide (GPOS SRG). + +## Why STIG guidance matters + +Following STIG guidance is critical for organizations that work with or support +U.S. government systems. It demonstrates alignment with DoD security standards +and helps: + +- Accelerate Authority to Operate (ATO) processes for DoD systems +- Reduce the risk of misconfiguration and exploitable weaknesses +- Simplify audits and reporting through standardized baselines + +Even outside of federal environments, STIGs are used by security-conscious +organizations as a benchmark for hardened system configurations. + +STIGs are derived from broader NIST guidance, particularly [NIST Special +Publication 800-53](https://csrc.nist.gov/publications/sp800), which defines a +catalog of security and privacy controls for federal systems. Organizations +pursuing compliance with 800-53 or related frameworks (such as FedRAMP) can use +STIGs as implementation guides that help meet applicable control requirements. + +## How Docker Hardened Images help apply STIG guidance + +Docker Hardened Images (DHIs) include STIG variants that are scanned against +custom STIG-based profiles and include signed STIG scan attestations. These +attestations can support audits and compliance reporting. + +While Docker Hardened Images are available to all, the STIG variant requires a +Docker subscription. + +Docker creates custom STIG-based profiles for images based on the GPOS SRG and +DoD Container Hardening Process Guide. Because DISA has not published a STIG +specifically for containers, these profiles help apply STIG-like guidance to +container environments in a consistent, reviewable way and are designed to +reduce false positives common in container images. + +## Identify images that include STIG scan results + +Docker Hardened Images that include STIG scan results are labeled as **STIG** in +the Docker Hardened Images catalog. + +To find DHI repositories with STIG image variants, [explore +images](../how-to/explore.md#image-variants) and: + +- Use the **STIG** filter on the catalog page +- Look for **STIG** labels on individual image listings + +To find a STIG image variant within a repository, go to the **Tags** tab in the +repository, and find images labeled with **STIG** in the **Compliance** column. + +## Use a STIG variant + +To use a STIG variant, you must [mirror](../how-to/mirror.md) the repository +and then pull the STIG image from your mirrored repository. + +## View and verify STIG scan results + +Docker provides a signed [STIG scan +attestation](../core-concepts/attestations.md) for each STIG-ready image. +These attestations include: + +- A summary of the scan results, including the number of passed, failed, and not + applicable checks +- The name and version of the STIG profile used +- Full output in both HTML and XCCDF (XML) formats + +### View STIG scan attestations + +You can retrieve and inspect a STIG scan attestation using the Docker Scout CLI: + +```console +$ docker scout attest get \ + --predicate-type https://docker.com/dhi/stig/v0.1 \ + --verify \ + --predicate \ + dhi.io/: +``` + +### Extract HTML report + +To extract and view the human-readable HTML report: + +```console +$ docker scout attest get dhi.io/: \ + --predicate-type https://docker.com/dhi/stig/v0.1 \ + --verify \ + --predicate \ + | jq -r '.[0].output[] | select(.format == "html").content | @base64d' > stig_report.html +``` + +### Extract XCCDF report + +To extract the XML (XCCDF) report for integration with other tools: + +```console +$ docker scout attest get dhi.io/: \ + --predicate-type https://docker.com/dhi/stig/v0.1 \ + --verify \ + --predicate \ + | jq -r '.[0].output[] | select(.format == "xccdf").content | @base64d' > stig_report.xml +``` + +### View STIG scan summary + +To view just the scan summary without the full reports: + +```console +$ docker scout attest get dhi.io/: \ + --predicate-type https://docker.com/dhi/stig/v0.1 \ + --verify \ + --predicate \ + | jq -r '.[0] | del(.output)' +``` + + diff --git a/content/manuals/dhi/core-concepts/vex.md b/content/manuals/dhi/core-concepts/vex.md new file mode 100644 index 00000000000..33ffe390229 --- /dev/null +++ b/content/manuals/dhi/core-concepts/vex.md @@ -0,0 +1,57 @@ +--- +title: Vulnerability Exploitability eXchange (VEX) +linktitle: VEX +description: Learn how VEX helps you prioritize real risks by identifying which vulnerabilities in Docker Hardened Images are actually exploitable. +keywords: vex container security, vulnerability exploitability, filter false positives, docker scout vex, cve prioritization +--- + +## What is VEX? + +Vulnerability Exploitability eXchange (VEX) is a specification for documenting +the exploitability status of vulnerabilities within software components. VEX is +primarily defined through industry standards such as CSAF (OASIS) and CycloneDX +VEX, with the U.S. Cybersecurity and Infrastructure Security Agency (CISA) +encouraging its adoption. VEX complements CVE (Common Vulnerabilities and +Exposures) identifiers by adding producer-asserted status information, +indicating whether a vulnerability is exploitable in the product as shipped. +This helps organizations prioritize remediation efforts by identifying +vulnerabilities that do not affect their specific product configurations. + +For how VEX affects vulnerability counts and scanner selection, see [Scanner +integrations](/manuals/dhi/explore/scanner-integrations.md). To scan a DHI with +VEX support, see [Scan Docker Hardened Images](/manuals/dhi/how-to/scan.md). + +## VEX status reference + +Each VEX statement includes a `status` field that records Docker's +exploitability assessment for a given CVE and image. DHI uses three of the four +OpenVEX status values. + +| Status | Meaning | +|---|---| +| `not_affected` | The CVE was reported against a package in the image, but Docker has assessed it is not exploitable as shipped | +| `under_investigation` | Docker is aware of the CVE and is actively evaluating whether it affects the image | +| `affected` | Docker has confirmed the CVE is exploitable in the image and a fix is not yet available | + +You can view the VEX statements for any DHI using Docker Scout. See [Scan Docker +Hardened Images](/manuals/dhi/how-to/scan.md). + +### `not_affected` justification codes + +`not_affected` statements include a machine-readable `justification` field +explaining why the vulnerability does not apply: + +| Justification | Meaning | +|---|---| +| `component_not_present` | The vulnerable component is not present in this image; the CVE matched by name against a different package | +| `vulnerable_code_not_present` | The vulnerable code path was not compiled into this build | +| `vulnerable_code_not_in_execute_path` | The vulnerable code exists in the package but is not called in this image's runtime configuration | +| `vulnerable_code_cannot_be_controlled_by_adversary` | The vulnerable code exists but an attacker cannot trigger it in this configuration | +| `inline_mitigations_already_exist` | Docker has applied a backport or patch that addresses the CVE | + +### Why DHI does not use `fixed` + +DHI does not use `fixed`. VEX-enabled scanners may not handle `fixed` +consistently, so when Docker backports an upstream patch where the version +number alone would not reflect the fix, it uses `not_affected` with +`inline_mitigations_already_exist` justification instead. diff --git a/content/manuals/dhi/explore/_index.md b/content/manuals/dhi/explore/_index.md new file mode 100644 index 00000000000..151f73e4fe0 --- /dev/null +++ b/content/manuals/dhi/explore/_index.md @@ -0,0 +1,58 @@ +--- +linktitle: Explore +title: Explore Docker Hardened Images +description: Learn about Docker Hardened Images, their purpose, how they are built and tested, and the shared responsibility model for security. +weight: 10 +params: + grid_about: + - title: What are hardened images and why use them? + description: Learn what a hardened image is, how Docker Hardened Images are built, what sets them apart from typical base and application images, and why you should use them. + icon: info + link: /dhi/explore/what/ + - title: Build process + description: Learn how Docker builds, tests, and maintains Docker Hardened Images through an automated, security-focused pipeline. + icon: build + link: /dhi/explore/build-process/ + - title: Image types + description: Learn about the different image types, distributions, and variants offered in the Docker Hardened Images catalog. + icon: view_module + link: /dhi/explore/available/ + - title: Scanner integrations + description: Discover which vulnerability scanners integrate with Docker Hardened Images and support open standards like OpenVEX. + icon: security + link: /dhi/explore/scanner-integrations/ + - title: Image testing + description: See how Docker Hardened Images are automatically tested for standards compliance, functionality, and security. + icon: science + link: /dhi/explore/test/ + - title: Malware scanning + description: Learn how Docker scans Docker Hardened Images for viruses and malware, and how to view and verify the scan attestation. + icon: bug_report + link: /dhi/explore/malware-scanning/ + - title: Responsibility overview + description: Understand Docker's role and your responsibilities when using Docker Hardened Images as part of your secure software supply chain. + icon: group + link: /dhi/explore/responsibility/ + - title: Give feedback + icon: question_exchange + description: Docker welcomes all contributions and feedback. + link: /dhi/explore/feedback +aliases: + - /dhi/about/ +--- + +Docker Hardened Images (DHI) are minimal, secure, and production-ready container +base and application images maintained by Docker. Designed to reduce +vulnerabilities and simplify compliance, DHI integrates easily into your +existing Docker-based workflows with little to no retooling required. + +This section helps you understand what Docker Hardened Images are, how they're +built and tested, the different types available, and how responsibility is +shared between Docker and you as a user. For a complete list of DHI features and +capabilities, see [Features](/dhi/features/). + +## Learn more about Docker Hardened Images + +{{< grid + items="grid_about" +>}} diff --git a/content/manuals/dhi/explore/available.md b/content/manuals/dhi/explore/available.md new file mode 100644 index 00000000000..9e6356ba256 --- /dev/null +++ b/content/manuals/dhi/explore/available.md @@ -0,0 +1,135 @@ +--- +linktitle: Image types +title: Available types of Docker Hardened Images +description: Learn about the different image types, distributions, and variants offered in the Docker Hardened Images catalog. +keywords: docker hardened images, distroless containers, distroless images, docker distroless, alpine base image, debian base image, development containers, runtime containers, secure base image, multi-stage builds +weight: 20 +aliases: + - /dhi/about/available/ +--- + +Docker Hardened Images (DHI) is a comprehensive catalog of +security-hardened container images built to meet diverse +development and production needs. + +You can explore the DHI catalog on [Docker Hub](https://hub.docker.com/search?q=&image_filter=store%2Cdhi) or use the [DHI CLI](../how-to/cli.md) to browse +available images, tags, and metadata from the command line. + +## Framework and application images + +DHI includes a selection of popular frameworks and application images, each +hardened and maintained to ensure security and compliance. These images +integrate seamlessly into existing workflows, allowing developers to focus on +building applications without compromising on security. + +For example, you might find repositories like the following in the DHI catalog: + +- `node`: framework for Node.js applications +- `python`: framework for Python applications +- `nginx`: web server image + +## Base image distributions + +Docker Hardened Images are available in different base image options, giving you +flexibility to choose the best match for your environment and workload +requirements: + +- Debian-based images: A good fit if you're already working in glibc-based + environments. Debian is widely used and offers strong compatibility across + many language ecosystems and enterprise systems. + +- Alpine-based images: A smaller and more lightweight option using musl libc. + These images tend to be small and are therefore faster to pull and have a + reduced footprint. + +Each image maintains a minimal and secure runtime layer by removing +non-essential components like shells, package managers, and debugging tools. +This helps reduce the attack surface while retaining compatibility with common +runtime environments. To maintain this lean, secure foundation, DHI standardizes +on Debian for glibc-based images, which provides broad compatibility while +minimizing complexity and maintenance overhead. + +Example tags include: + +- `3.9.23-alpine3.21`: Alpine-based image for Python 3.9.23 +- `3.9.23-debian12`: Debian-based image for Python 3.9.23 + +If you're not sure which to choose, start with the base you're already familiar +with. Debian tends to offer the broadest compatibility. + +## Development and runtime variants + +To accommodate different stages of the application lifecycle, DHI offers all +language framework images and select application images in two variants: + +- Development (dev) images: Equipped with necessary development tools and +libraries, these images facilitate the building and testing of applications in a +secure environment. They include a shell, package manager, a root user, and +other tools needed for development. + +- Runtime images: Stripped of development tools, these images contain only the +essential components needed to run applications, ensuring a minimal attack +surface in production. + +This separation supports multi-stage builds, enabling developers to compile code +in a secure build environment and deploy it using a lean runtime image. + +For example, you might find tags like the following in a DHI repository: + +- `3.9.23-debian12`: runtime image for Python 3.9.23 +- `3.9.23-debian12-dev`: development image for Python 3.9.23 + +## FIPs and STIG variants + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Some Docker Hardened Images include a `-fips` variant. These variants use +cryptographic modules that have been validated under [FIPS +140](../core-concepts/fips.md), a U.S. government standard for secure +cryptographic operations. + +FIPS variants are designed to help organizations meet regulatory and compliance +requirements related to cryptographic use in sensitive or regulated +environments. + +You can recognize FIPS variants by their tag that includes `-fips`. + +For example: +- `3.13-fips`: FIPS variant of the Python 3.13 image +- `3.9.23-debian12-fips`: FIPS variant of the Debian-based Python 3.9.23 image + +FIPS variants can be used in the same way as any other Docker Hardened Image and +are ideal for teams operating in regulated industries or under compliance +frameworks that require cryptographic validation. + +In addition to FIPS variants, some Docker Hardened Images also include +STIG-ready variants. These images are scanned against custom STIG-based +profiles and come with signed STIG scan attestations to support audits and +compliance reporting. To identify STIG-ready variants, look for the **STIG** +in the **Compliance** column of the image tags list in the Docker Hub catalog. + +## Compatibility variants + +Some Docker Hardened Images include a compatibility variant. These variants +provide additional tools and configurations for specific use cases without +bloating the minimal base images. + +Compatibility variants are created to support: + +- Helm chart compatibility: Applications deployed via Helm charts and + Kubernetes that require specific runtime configurations or utilities for + seamless integration with popular Helm charts. + +- Special application use-cases: Applications that need optional tools not + included in the minimal image. + +By offering these as separate image flavors, DHI ensures that the minimal images +remain lean and secure, while providing the tools you need in dedicated +variants. This approach maintains a minimal attack surface for standard +deployments while supporting specialized requirements when needed. + +You can recognize compatibility variants by their tag that includes `-compat`. + +Use compatibility variants when your deployment requires additional tools beyond +the minimal runtime, such as when using Helm charts or applications with +specific tooling requirements. diff --git a/content/manuals/dhi/explore/build-process.md b/content/manuals/dhi/explore/build-process.md new file mode 100644 index 00000000000..01cd6541eb7 --- /dev/null +++ b/content/manuals/dhi/explore/build-process.md @@ -0,0 +1,189 @@ +--- +title: How Docker Hardened Images are built +linkTitle: Build process +description: Learn how Docker builds, tests, and maintains Docker Hardened Images through an automated, security-focused pipeline. +keywords: docker hardened images, slsa build level 3, automated patching, ai guardrail, build process, signed sbom, supply chain security +weight: 15 +aliases: + - /dhi/about/build-process/ +--- + +Docker Hardened Images are built through an automated pipeline that monitors +upstream sources, applies security updates, and publishes signed artifacts. +This page explains the build process for both base DHI images and customized +images available with DHI Select and DHI Enterprise subscriptions. + +With DHI Select or DHI Enterprise subscriptions, the automated security update pipeline for +both base and customized images is backed by [SLA commitments](https://docs.docker.com/go/dhi-sla/), including a 7-day +SLA for critical and high severity vulnerabilities. DHI Community offers a secure baseline +but no guaranteed remediation timelines. + +## Build transparency + +Docker Hardened Images provide transparency into how images are built through +publicly available definitions and verifiable attestations. + +### Image definitions + +All image definitions are publicly available in the [catalog +repository](https://github.com/docker-hardened-images/catalog). + +Each image definition is a declarative YAML specification that includes metadata, +contents, build pipeline steps, security configurations, and runtime settings. + +### SLSA attestations + +Every Docker Hardened Image includes a SLSA Build Level 3 attestation that +provides verifiable build provenance. For details on SLSA attestations and how to +verify them, see [SLSA](../core-concepts/slsa.md). + +## Build triggers + +Builds start automatically. You don't trigger them manually. The system monitors +for changes and starts builds in two scenarios: + +- [Upstream updates](#upstream-updates) +- [Customization changes](#customization-changes) + +### Upstream updates + +New releases, package updates, or CVE fixes from upstream projects trigger base +image rebuilds. These builds go through quality checks to ensure security and +reliability. + +#### Monitoring for updates + +Docker continuously monitors upstream projects for new releases, package +updates, and security advisories. When changes are detected, the system +automatically queues affected images for rebuild using a SLSA Build Level +3-compliant build system. + +Docker uses three strategies to track updates: + +- GitHub releases: Monitors specific GitHub repositories for new releases and + automatically updates the image definition when a new version is published. +- GitHub tags: Tracks tags in GitHub repositories to detect new versions. +- Package repositories: Monitors Alpine Linux, Debian, and Ubuntu package + repositories through Docker Scout's package database to detect updated + packages. + +In addition to explicit upstream tracking, Docker also monitors transitive +dependencies. When a package update is detected (for example, a security patch +for a library), Docker automatically identifies and rebuilds all images within +the support window that use that package. + +### Customization changes + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Updates to your OCI artifact customizations trigger rebuilds of your customized +images. + +When you customize a DHI image with DHI Select or DHI Enterprise, your changes are packaged as +OCI artifacts that layer on top of the base image. Docker monitors your artifact +repositories and automatically rebuilds your customized images whenever you push +updates. + +The rebuild process fetches the current base image, applies your OCI artifacts, +signs the result, and publishes it automatically. You don't need to manage +builds or maintain CI pipelines for your customized images. + +Customized images are also rebuilt automatically when the base DHI image they +depend on receives updates, ensuring your images always include the latest +security patches. + +## Build pipeline + +The following sections describe the build pipeline architecture and workflow for +Docker Hardened Images based on: + +- [Base image pipeline](#base-image-pipeline) +- [Customized image pipeline](#customized-image-pipeline) + +### Base image pipeline + +Each Docker Hardened Image is built through an automated pipeline: + +1. Monitoring: Docker monitors upstream sources for updates (new releases, + package updates, security advisories). +2. Rebuild trigger: When changes are detected, an automated rebuild starts. +3. AI guardrail: An AI system fetches upstream diffs and scans them with + language-aware checks. The guardrail focuses on high-leverage issues that can + cause significant problems, such as inverted error checks, ignored failures, + resource mishandling, or suspicious contributor activity. When it spots + potential risks, it blocks the PR from auto-merging. +4. Human review: If the AI identifies risks with high confidence, + Docker engineers review the flagged code, reproduce the issue, and decide on + the appropriate action. Engineers often contribute fixes back to upstream + projects, improving the code for the entire community. When fixes are accepted + upstream, the DHI build pipeline applies the patch immediately to protect + customers while the fix moves through the upstream release process. +5. Testing and scanning: Images undergo comprehensive + [testing](test.md) for compatibility and functionality, and are + [scanned for malware](malware-scanning.md), secrets, and + vulnerabilities. +6. Signing and attestations: Docker signs each image and generates + attestations (SBOMs, VEX documents, build provenance). +7. Publishing: The signed image is published to the DHI registry and the + attestations are published to the Docker Scout registry. +8. Cascade rebuilds: If any customized images use this base, their rebuilds + are automatically triggered. + +Docker responds quickly to critical vulnerabilities. By building essential +components from source rather than waiting for packaged updates, Docker can +patch critical and high severity CVEs within days of upstream fixes and publish +updated images with new attestations. For DHI Enterprise subscriptions, this +rapid response is backed by a [7-day SLA for critical and high severity +vulnerabilities](https://docs.docker.com/go/dhi-sla/). + +The following diagram shows the base image build flow: + +```goat {class="text-sm"} +.-------------------. .-------------------. .-------------------. .-------------------. +| Docker monitors |----->| Trigger rebuild |----->| AI guardrail |----->| Human review | +| upstream sources | | | | scans changes | | | +'-------------------' '-------------------' '-------------------' '-------------------' + | + v +.-------------------. .-------------------. .-------------------. .-------------------. +| Cascade rebuilds |<-----| Publish to |<-----| Sign & generate |<-----| Testing & | +| (if needed) | | DHI registry | | attestations | | scanning | +'-------------------' '-------------------' '-------------------' '-------------------' +``` + +### Customized image pipeline + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +When you customize a DHI image with DHI Select or DHI Enterprise, the build process is simplified: + +1. Monitoring: Docker monitors your OCI artifact repositories for changes. +2. Rebuild trigger: When you push updates to your OCI artifacts, or when the base + DHI image is updated, an automated rebuild starts. +3. Fetch base image: The latest base DHI image is fetched. +4. Apply customizations: Your OCI artifacts are applied to the base image. +5. Scanning: The customized image is [scanned for + malware](malware-scanning.md), secrets, and vulnerabilities. +6. Signing and attestations: Docker signs the customized image and generates + attestations (SBOMs, VEX documents, build provenance). +7. Publishing: The signed customized image is published to Docker Hub and the + attestations are published to the Docker Scout registry. + +Docker handles the entire process automatically, so you don't need to manage +builds for your customized images. However, you're responsible for testing your +customized images and managing any CVEs introduced by your OCI artifacts. + +The following diagram shows the customized image build flow: + +```goat {class="text-sm"} +.-------------------. .-------------------. .-------------------. .-------------------. +| Docker monitors |----->| Trigger rebuild |----->| Fetch base |----->| Apply | +| OCI artifacts | | | | DHI image | | customizations | +'-------------------' '-------------------' '-------------------' '-------------------' + | + v + .-------------------. .-------------------. .-------------------. + | Publish to |<-----| Sign & generate |<-----| Scanning | + | Docker Hub | | attestations | | | + '-------------------' '-------------------' '-------------------' +``` diff --git a/content/manuals/dhi/explore/feedback.md b/content/manuals/dhi/explore/feedback.md new file mode 100644 index 00000000000..48fa9183368 --- /dev/null +++ b/content/manuals/dhi/explore/feedback.md @@ -0,0 +1,43 @@ +--- +title: Give feedback +linkTitle: Feedback +description: How to interact with the DHI team +keywords: software supply chain security, feedback, bugs, discussion, questions +weight: 999 +aliases: + - /dhi/about/feedback/ +--- + +Committed to maintaining the quality, security, and reliability of the Docker Hardened Images (DHI) +a repository has been created as a point of contact to encourage the community to collaborate +in improving the Hardened Images ecosystem. + +## Questions or discussions + +You can use the [GitHub Discussions +board](https://github.com/orgs/docker-hardened-images/discussions) to engage +with the DHI team for: + +- General questions about DHIs +- Best practices and recommendations +- Security tips and advice +- Show and tell your implementations +- Community announcements + +## Reporting bugs or issues + +You can [open a new issue](https://github.com/docker-hardened-images/catalog/issues) for topics such as: + +- Bug reports +- Feature requests +- Documentation improvements +- Security vulnerabilities (see security policy) + +It's encouraged to first search existing issues to see if it’s already been reported. +The DHI team reviews reports regularly and appreciates clear, actionable feedback. + +## Responsible security disclosure + +It is forbidden to post details of vulnerabilities before coordinated disclosure and resolution. + +If you discover a security vulnerability, report it responsibly by following Docker’s [security disclosure](https://www.docker.com/trust/vulnerability-disclosure-policy/). diff --git a/content/manuals/dhi/explore/malware-scanning.md b/content/manuals/dhi/explore/malware-scanning.md new file mode 100644 index 00000000000..60ab4c1b474 --- /dev/null +++ b/content/manuals/dhi/explore/malware-scanning.md @@ -0,0 +1,94 @@ +--- +title: Malware scanning +description: Learn how Docker scans Docker Hardened Images for viruses and malware using ClamAV, and how to view and verify the scan attestation. +keywords: docker hardened images, malware scanning, virus scan, clamav, attestation, image security +weight: 50 +--- + +The Docker Hardened Image (DHI) pipeline scans for viruses and malware as part +of the build process. The scan results are embedded as a signed attestation, +which you can independently retrieve and verify. + +## How it works + +Docker uses [ClamAV](https://www.clamav.net/), an open source antivirus engine, +to scan every layer of each image. The scan runs automatically during the build +process and checks all files in the image, including files inside archives, for +known viruses and malware signatures. + +The scan results are published as a signed attestation attached to the image. +The attestation includes the full ClamAV scan report, including the number of +files scanned, the virus signature database version, and whether any infected +files were detected. + +## View the malware scan attestation + +You can retrieve the malware scan attestation using the Docker Scout CLI. + +1. Use the `docker scout attest get` command with the virus scan predicate type: + + ```console + $ docker scout attest get \ + --predicate-type https://scout.docker.com/virus/v0.1 \ + --predicate \ + dhi.io/: + ``` + + > [!NOTE] + > + > If the image exists locally on your device, you must prefix the image name + > with `registry://`. For example, use `registry://dhi.io/python` instead of + > `dhi.io/python`. + + For example: + + ```console + $ docker scout attest get \ + --predicate-type https://scout.docker.com/virus/v0.1 \ + --predicate \ + dhi.io/python:3.13 + ``` + + The output is a JSON object containing the scanner used and the base64-encoded + scan report: + + ```json + { + "scanner": { + "report": "", + "uri": "clamav/clamav:stable" + } + } + ``` + + Decoding the report shows the full ClamAV output, ending with a scan summary: + + ```text + ----------- SCAN SUMMARY ----------- + Known viruses: 3627833 + Engine version: 1.5.2 + Scanned directories: 4 + Scanned files: 21 + Infected files: 0 + Data scanned: 44.90 MiB + Data read: 23.88 MiB (ratio 1.88:1) + Time: 11.473 sec (0 m 11 s) + Start Date: 2026:04:12 02:36:19 + End Date: 2026:04:12 02:36:30 + ``` + +2. Verify the attestation signature. To ensure the attestation is authentic and + signed by Docker, run: + + ```console + $ docker scout attest get \ + --predicate-type https://scout.docker.com/virus/v0.1 \ + --verify \ + dhi.io/: --platform + ``` + + If the attestation is valid, Docker Scout confirms the signature and shows + the matching `cosign verify` command. + +To view other attestations, such as SBOMs or test results, see [Verify +an image](../how-to/verify.md). diff --git a/content/manuals/dhi/explore/responsibility.md b/content/manuals/dhi/explore/responsibility.md new file mode 100644 index 00000000000..e4e816dd032 --- /dev/null +++ b/content/manuals/dhi/explore/responsibility.md @@ -0,0 +1,83 @@ +--- +title: Understanding roles and responsibilities for Docker Hardened Images +linkTitle: Responsibility overview +description: Understand the division of responsibilities between Docker, upstream projects, and you when using Docker Hardened Images. +keywords: software supply chain security, signed sbom, vex document, container provenance, image attestation +weight: 46 +aliases: + - /dhi/about/responsibility/ +--- + +Docker Hardened Images (DHIs) are curated and maintained by Docker, and built +using upstream open source components. To deliver security, reliability, and +compliance, responsibilities are shared among three groups: + +- Upstream maintainers: the developers and communities responsible for the + open source software included in each image. +- Docker: the provider of hardened, signed, and maintained container images. +- You (the customer): the consumer who runs and, optionally, customizes DHIs + in your environment. + +This topic outlines who handles what, so you can use DHIs effectively and +securely. + +## Releases + +- Upstream: Publishes and maintains official releases of the software + components included in DHIs. This includes versioning, changelogs, and + deprecation notices. +- Docker: Builds, hardens, and signs Docker Hardened Images based on + upstream versions. Docker maintains these images in line with upstream release + timelines and internal policies. +- You: Ensure you're staying on supported versions of DHIs and upstream + projects. Using outdated or unsupported components can introduce security + risk. + +## Patching + +- Upstream: Maintains and updates the source code for each component, + including fixing vulnerabilities in libraries and dependencies. +- Docker: Rebuilds and re-releases images with upstream patches applied. Docker + monitors for vulnerabilities and publishes updates to affected images. DHI Select + and DHI Enterprise include [SLA commitments](https://docs.docker.com/go/dhi-sla/). DHI Community offers a secure baseline but no + guaranteed remediation timelines. +- You: Apply DHI updates in your environments and patch any software or + dependencies you install on top of the base image. + +## Testing + +- Upstream: Defines the behavior and functionality of the original software, + and is responsible for validating core features. +- Docker: Validates that DHIs start, run, and behave consistently with + upstream expectations. Docker also runs security scans and includes a [testing + attestation](../core-concepts/attestations.md) with each image. +- You: Test your application on top of DHIs and validate that any changes or + customizations function as expected in your environment. + +## Security and compliance + +- Docker: Publishes signed SBOMs, VEX documents, provenance data, and CVE + scan results with each image to support compliance and supply chain security. + - For DHI Community users: All security metadata and transparency features are + included at no cost. + - For DHI Select and Enterprise users: Additional compliance variants (like FIPS and + STIG) and customization capabilities are available, with automatic rebuilds + when base images are patched. +- You: Integrate DHIs into your security and compliance workflows, including + vulnerability management and auditing. + +## Support + +- Docker: + - For DHI Community users: Community support and public documentation are available. + - For DHI Select and DHI Enterprise users: Access to Docker's enterprise + support team for mission-critical applications. +- You: Monitor Docker's release notes, security advisories, and documentation + for updates and best practices. + +## Summary + +Docker Hardened Images give you a secure foundation, complete with signed +metadata and upstream transparency. Your role is to make informed use of these +images, apply updates promptly, and validate that your configurations and +applications meet your internal requirements. \ No newline at end of file diff --git a/content/manuals/dhi/explore/scanner-integrations.md b/content/manuals/dhi/explore/scanner-integrations.md new file mode 100644 index 00000000000..b4df4245dc2 --- /dev/null +++ b/content/manuals/dhi/explore/scanner-integrations.md @@ -0,0 +1,174 @@ +--- +title: Scanner integrations +description: Learn which vulnerability scanners work with Docker Hardened Images and how to choose the right scanner for accurate vulnerability assessment. +keywords: scanner integration, vulnerability scanning, docker scout, trivy, grype, mend.io, container security scanners +weight: 40 +--- + +Docker Hardened Images work with various vulnerability scanners. However, to get +accurate results that reflect the actual security posture of these images, your +scanner needs to understand the VEX (Vulnerability Exploitability eXchange) +attestations included with each image. + +## Scanners with VEX support + +The following scanners can read and apply VEX attestations included with Docker +Hardened Images to deliver more accurate vulnerability assessments: + +- [Docker Scout](/scout/): Automatically applies VEX statements with + zero configuration. Integrated directly into Docker Desktop and the Docker CLI. +- [Trivy](https://trivy.dev/): Supports VEX through VEX Hub for automatic + updates or local VEX files for air-gapped environments. +- [Grype](https://github.com/anchore/grype): Supports VEX via the `--vex` + flag for local VEX file processing. +- [Wiz](https://www.wiz.io/): Automatically applies VEX statements with + zero configuration. +- [Mend.io](https://www.mend.io/): Automatically retrieves and applies VEX + statements with zero configuration. Combines VEX data with reachability + analysis. + +For step-by-step instructions, see [Scan Docker Hardened Images](/manuals/dhi/how-to/scan.md). + +## Choosing a scanner for Docker Hardened Images + +When selecting a scanner for use with Docker Hardened Images, whether it +supports open standards like OpenVEX is the key differentiator. + +Docker Hardened Images include signed VEX attestations that follow the +[OpenVEX standard](https://openvex.dev/). OpenVEX is an open standard that meets +the minimum requirements for VEX defined by CISA (Cybersecurity and +Infrastructure Security Agency), the U.S. government agency responsible for +cybersecurity guidance. These attestations document which vulnerabilities don't +apply to the image and why, helping you focus on real risks. To understand what +VEX is and how it works, see the [VEX core concept](/manuals/dhi/core-concepts/vex.md). + +Because OpenVEX is an open standard with government backing, it has strong +industry momentum and any tool can implement it without vendor-specific +integrations. This matters when you bring in third-party auditors with their own +scanning tools, or when you want to use multiple security tools in your +pipeline. With VEX, these tools can all read and verify the same vulnerability +data directly from your images. + +Without open standards like VEX, vendors make exploitability decisions using +proprietary methods, making it difficult to verify claims or compare results +across tools. This fragments your security toolchain and creates inconsistent +vulnerability assessments across different scanning tools. + +### Benefits of scanners with VEX support + +Scanners that support open standards like OpenVEX and can interpret VEX attestations +from Docker Hardened Images offer the following benefits: + +- Accurate vulnerability counts: Automatically filter out vulnerabilities + that don't apply to your specific image, often reducing false positives + dramatically. +- Transparency and auditability: Verify exactly why vulnerabilities are or + aren't flagged; security teams and compliance officers can review the reasoning + rather than trusting a vendor's black box. +- Scanner flexibility: Switch between any VEX-enabled scanner (Docker Scout, + Trivy, Grype, Wiz, Mend.io, etc.) without losing vulnerability context or + rebuilding exclusion lists. +- Consistent results: VEX-enabled scanners interpret the same data the + same way, eliminating discrepancies between tools. +- Faster workflows: Focus on real risks rather than researching why reported + CVEs don't actually affect your deployment. + +### Scanners without VEX support + +Scanners that can't read VEX attestations will report vulnerabilities that don't +apply to Docker Hardened Images. This creates operational challenges: + +- Manual filtering required: You'll need to maintain scanner-specific ignore + lists to replicate what VEX statements already document. +- Higher false positive rates: Expect to see more reported vulnerabilities + that don't represent real risks. +- Increased investigation time: Security teams spend time researching why + CVEs don't apply instead of addressing actual vulnerabilities. With Docker + Hardened Images, security experts at Docker manage this investigation for + you, thoroughly vetting each justification before adding it to a VEX statement. +- CI/CD friction: Build pipelines may fail on vulnerabilities that aren't + exploitable in your images. + +### VEX-based vulnerability handling versus proprietary approaches + +Docker Hardened Images use VEX attestations based on the OpenVEX open standard to document vulnerability exploitability. OpenVEX is an open standard that is recognized by government agencies such as CISA. This open standards approach differs from how some other image vendors handle vulnerabilities using proprietary methods. + +#### Docker Hardened Images with VEX + +The image includes signed attestations that explain which vulnerabilities don't +apply and why. Any VEX-enabled scanner can read these attestations, giving you: + +- Tool flexibility: Use any scanner that supports OpenVEX (Docker Scout, + Trivy, Grype, Wiz, Mend.io, etc.) +- Complete transparency: Review the exact reasoning for each vulnerability + assessment +- Full auditability: Security teams and compliance officers can independently + verify all vulnerability assessments and reasoning +- Historical visibility: VEX statements remain with the image, so you can + always check vulnerability status, even for older versions + +#### Proprietary vulnerability handling + +Some image vendors use proprietary advisory feeds or internal databases instead +of VEX. While this may result in fewer reported vulnerabilities, it creates +significant limitations: + +- Tool dependency: You must use the vendor's preferred scanning tools to see + their vulnerability filtering, while standard scanners will still report all + CVEs; scanners must implement proprietary feeds rather than using open + standards that work with all images +- No transparency: Proprietary feeds act as "black boxes" - vulnerabilities + simply disappear from vendor tools with no explanation +- Limited verifiability: Security teams have no way to independently verify + why vulnerabilities are excluded or whether the reasoning is sound +- Maintenance challenges: If you scan older image versions with standard + tools, you can't determine which vulnerabilities actually applied at that + time, making long-term security tracking difficult +- Ecosystem incompatibility: Your existing security tools (SCA, policy + engines, compliance scanners) can't access or verify the vendor's proprietary + vulnerability data + +The fundamental difference: VEX-based approaches explain vulnerability +assessments using open standards that any tool can verify and audit. Proprietary +approaches hide vulnerabilities in vendor-specific systems where the reasoning +can't be independently validated. + +For Docker Hardened Images, use VEX-enabled scanners to get accurate results +that work across your entire security toolchain. + +## What to expect from different scanners + +When scanning Docker Hardened Images with different tools, you'll see +significant differences in reported vulnerability counts. + +### What VEX-enabled scanners filter automatically + +When you scan Docker Hardened Images with VEX-enabled scanners, they +automatically exclude vulnerabilities that don't apply: + +- Hardware-specific vulnerabilities: Issues that only affect specific + hardware architectures (for example, Power10 processors) that are irrelevant to + containerized workloads. +- Unreachable code paths: CVEs in code that exists in the package but isn't + executed in the image's runtime configuration. +- Build-time only issues: Vulnerabilities in build tools or dependencies + that don't exist in the final runtime image. +- Temporary identifiers: Placeholder vulnerability IDs (like Debian's + `TEMP-xxxxxxx`) that aren't intended for external tracking. + +### Using scanners without VEX support + +If your scanner doesn't support VEX, you'll need to manually exclude +vulnerabilities through scanner-specific mechanisms like ignore lists or policy +exceptions. This requires: + +- Reviewing VEX statements from Docker Hardened Images +- Translating VEX justifications into your scanner's format +- Maintaining these exclusions as new vulnerabilities are discovered +- Repeating this process if you switch scanners or add additional scanning tools + +## What's next + +Learn how to [scan Docker Hardened Images](/manuals/dhi/how-to/scan.md) with +VEX-compliant scanners. + diff --git a/content/manuals/dhi/explore/test.md b/content/manuals/dhi/explore/test.md new file mode 100644 index 00000000000..a8c56f27974 --- /dev/null +++ b/content/manuals/dhi/explore/test.md @@ -0,0 +1,155 @@ +--- +title: How Docker Hardened Images are tested +linktitle: Image testing +description: See how Docker Hardened Images are automatically tested for standards compliance, functionality, and security. +keywords: docker scout, test attestation, cosign verify, image testing, vulnerability scan +weight: 45 +aliases: + - /dhi/about/test/ +--- + +Docker Hardened Images (DHIs) are designed to be secure, minimal, and +production-ready. To ensure their reliability and security, Docker employs a +comprehensive testing strategy, which you can independently verify using signed +attestations and open tooling. + +Every image is tested for standards compliance, functionality, and security. The +results of this testing are embedded as signed attestations, which can be +[inspected and verified](#view-and-verify-the-test-attestation) programmatically +using the Docker Scout CLI. + +## Testing strategy overview + +The testing process for DHIs focuses on two main areas: + +- Image standards compliance: Ensuring that each image adheres to strict size, + security, and compatibility standards. +- Application functionality: Verifying that applications within the images + function correctly. + +## Image standards compliance + +Each DHI undergoes rigorous checks to meet the following standards: + +- Minimal attack surface: Images are built to be as small as possible, removing + unnecessary components to reduce potential vulnerabilities. +- Near-zero known CVEs: Images are scanned using tools like Docker Scout to + ensure they are free from known Common Vulnerabilities and Exposures (CVEs). +- Multi-architecture support: DHIs are built for multiple architectures + (`linux/amd64` and `linux/arm64`) to ensure broad compatibility. +- Kubernetes compatibility: Images are tested to run seamlessly within + Kubernetes clusters, ensuring they meet the requirements for container + orchestration environments. + +## Application functionality testing + +Docker tests Docker Hardened Images to ensure they behave as expected in typical +usage scenarios. This includes verifying that: + +- Applications start and run successfully in containerized environments. +- Runtime behavior aligns with upstream expectations. +- Build variants (like `-dev` images) support common development and build tasks. + +The goal is to ensure that DHIs work out of the box for the most common use +cases while maintaining the hardened, minimal design. + +## Automated testing and CI/CD integration + +Docker integrates automated testing into its Continuous Integration/Continuous +Deployment (CI/CD) pipelines: + +- Automated scans: Each image build triggers automated scans for vulnerabilities + and compliance checks. +- Reproducible builds: Build processes are designed to be reproducible, ensuring + consistency across different environments. +- Continuous monitoring: Docker continuously monitors for new vulnerabilities + and updates images accordingly to maintain security standards. + +## Testing attestation + +Docker provides a test attestation that details the testing and validation +processes each DHI has undergone. + +### View and verify the test attestation + +You can view and verify this attestation using the Docker Scout CLI. + +1. Use the `docker scout attest get` command with the test predicate type: + + ```console + $ docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --predicate \ + dhi.io/: + ``` + + > [!NOTE] + > + > If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use + > `registry://dhi.io/python` instead of `dhi.io/python`. + + For example: + + ```console + $ docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --predicate \ + dhi.io/python:3.13 + ``` + + This contains a list of tests and their results. + + Example output: + + ```console + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + { + "reportFormat": "CTRF", + "results": { + "summary": { + "failed": 0, + "passed": 1, + "skipped": 0, + "start": 1749216533, + "stop": 1749216574, + "tests": 1 + }, + "tests": [ + { + ... + ``` + +2. Verify the test attestation signature. To ensure the attestation is authentic + and signed by Docker, run: + + ```console + docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --verify \ + dhi.io/: --platform + ``` + + Example output: + + ```console + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + v cosign verify registry.scout.docker.com/docker/dhi-python@sha256:70c8299c4d3cb4d5432734773c45ae58d8acc2f2f07803435c65515f662136d5 \ + --key https://registry.scout.docker.com/keyring/dhi/latest.pub --experimental-oci11 + + Verification for registry.scout.docker.com/docker/dhi-python@sha256:70c8299c4d3cb4d5432734773c45ae58d8acc2f2f07803435c65515f662136d5 -- + The following checks were performed on each of these signatures: + - The cosign claims were validated + - Existence of the claims in the transparency log was verified offline + - The signatures were verified against the specified public key + + i Signature payload + ... + ``` + +If the attestation is valid, Docker Scout will confirm the signature and show +the matching `cosign verify` command. + +To view other attestations, such as SBOMs or vulnerability reports, see [Verify +an image](../how-to/verify.md). diff --git a/content/manuals/dhi/explore/what.md b/content/manuals/dhi/explore/what.md new file mode 100644 index 00000000000..b834f88113a --- /dev/null +++ b/content/manuals/dhi/explore/what.md @@ -0,0 +1,97 @@ +--- +title: What are hardened images and why use them? +linktitle: Hardened images +description: Learn what a hardened image is, why it matters, and how Docker Hardened Images offer stronger security, compliance, and operational efficiency. +keywords: hardened container image, docker hardened images, distroless container, slsa build level 3, signed sbom, vulnerability scan, compliance-ready container +weight: 5 +aliases: + - /dhi/about/what/ +--- + +In today’s diverse software environments, container images are often designed +for flexibility and broad compatibility. While that makes them ideal for many +use cases, it can also result in images that include more components than needed +for specific workloads. Docker Hardened Images take a minimal-by-design approach +to help reduce image size, limit the attack surface, and streamline security and +compliance workflows. + +Hardened images solve this by minimizing what's in the container image. Less +software means fewer vulnerabilities, faster deployments, and fewer red +dashboards to chase down every week. + +For platform engineers and security teams, hardened images offer a way out of +the CVE triage cycle, letting you focus on delivering secure, compliant +infrastructure without constant firefighting. + +## What is a hardened image? + +A hardened image is a container image that has been deliberately minimized and +secured to reduce vulnerabilities and meet stringent security and compliance +requirements. Unlike standard images, which may include non-essential components +that increase risk, hardened images are streamlined to include only what’s +needed to run your application securely. + +## Benefits of hardened images + +- Reduced attack surface: By removing non-essential components, hardened images + limit potential entry points for attackers. +- Improved security posture: Regular updates and vulnerability scans help ensure + hardened images remain secure over time. +- Compliance facilitation: Inclusion of signed metadata like SBOMs supports + meeting regulatory and organizational compliance standards. +- Operational efficiency: Smaller image sizes lead to faster pulls, lower runtime overhead, and reduced cloud resource costs. + +## What is a Docker Hardened Image? + +Docker Hardened Images (DHIs) take hardened images even further by combining +minimal, secure design with enterprise-grade support and tooling. Built with +security at the core, these images are continuously maintained, tested, and +validated to meet today’s toughest software supply chain and compliance +standards. + +Docker Hardened Images are secure by default, minimal by design, and maintained +so you don’t have to. + +## How Docker Hardened Images differ from generic hardened images + +- SLSA-compliant builds: Docker Hardened Images are built to meet [SLSA Build + Level 3](../core-concepts/slsa.md), ensuring a tamper-resistant, verifiable, + and auditable build process that protects against supply chain threats. + +- Distroless approach: Unlike traditional base images that bundle an entire OS + with shells, package managers, and debugging tools, [distroless + images](../core-concepts/distroless.md) retain only the minimal OS components + required to run your application. By excluding unnecessary tooling and + libraries, they reduce the attack surface by up to 95% and can improve + performance and image size. + +- Continuous maintenance: All DHIs are continuously monitored and updated to + maintain near-zero known exploitable [CVEs](../core-concepts/cves.md), helping + your teams avoid patch fatigue and surprise alerts. + +- Compliance-ready: Each image includes cryptographically signed metadata: + - [SBOMs](../core-concepts/sbom.md) that show what's in the image + - [VEX documents](../core-concepts/vex.md) to identify which vulnerabilities + are actually exploitable + - [Build provenance](../core-concepts/provenance.md) that proves how and where + the image was built + +- Compatibility-focused design: Docker Hardened Images provide a minimal runtime + environment while maintaining compatibility with common Linux distributions. + They remove non-essential components like shells and package managers to + enhance security, yet retain a small base layer built on familiar distribution + standards. Images are typically available with musl libc (Alpine-based) and + glibc (Debian-based), supporting a broad range of application compatibility + needs. + +## Why use Docker Hardened Images? + +Docker Hardened Images (DHIs) are secure by default, minimal by design, and +maintained so you don't have to. They offer: + + +- Images built for peace of mind: Ultra-minimal and distroless, DHIs eliminate up to 95% of the traditional container attack surface. +- No more patch panic: With continuous CVE scanning and [SLA-backed remediation](https://docs.docker.com/go/dhi-sla/), Docker helps you stay ahead of threats. +- Audit-ready images: All DHIs include signed SBOMs, VEX, and provenance that support security and compliance workflows. +- Images that work with your stack: Available in Alpine and Debian flavors, DHIs drop into your existing Dockerfiles and pipelines. +- Images backed by enterprise support: Get peace of mind with Docker's support and rapid response to critical vulnerabilities. diff --git a/content/manuals/dhi/features.md b/content/manuals/dhi/features.md new file mode 100644 index 00000000000..379eb0a5fe6 --- /dev/null +++ b/content/manuals/dhi/features.md @@ -0,0 +1,156 @@ +--- +title: Docker Hardened Images features +linktitle: Features +description: Docker Hardened Images provide total transparency, minimal attack surface, and enterprise-grade security for every application—free and open source. +weight: 5 +aliases: + - /dhi/features/secure/ + - /dhi/features/integration/ + - /dhi/features/support/ + - /dhi/features/patching/ + - /dhi/features/flexible/ + - /dhi/features/helm/ +--- + +Docker Hardened Images (DHI) are minimal, secure, and production-ready container +base and application images maintained by Docker. Designed to reduce +vulnerabilities and simplify compliance, DHI integrates easily into your +existing Docker-based workflows with little to no retooling required. + +DHI provides security for everyone: + +- [DHI Community](#dhi-community-features) provides core security features available to + everyone with no licensing restrictions under Apache 2.0. +- [DHI Select and DHI Enterprise](#dhi-select-and-enterprise-features) add SLA-backed + security updates, FIPS/STIG compliance variants, and customization + capabilities, with DHI Enterprise offering unlimited customization, full + catalog access, and optional Extended Lifecycle Support (ELS) for post-EOL + coverage. + +## DHI Community features + +DHI's core features are open and free to use, share, and build on with no +licensing surprises, backed by an Apache 2.0 license. + +### Security by default + +- Near-zero CVEs: Continuously scanned and patched to maintain minimal known + exploitable vulnerabilities, with no SLA-backed time commitments for DHI Community users +- Minimal attack surface: Distroless variants reduce attack surface by up to 95% by removing unnecessary components +- Non-root execution: Run as non-root by default, following the principle of least privilege +- Transparent vulnerability reporting: Every CVE is visible and assessed using public data—no suppressed feeds or proprietary scoring + +### Hardened system packages + +Docker Hardened Images maintain supply chain integrity throughout the entire +image stack with hardened system packages: + +- Source-built packages: For supported distributions, system packages are built + from source code by Docker +- Cryptographic signatures: Every package is cryptographically signed and verified +- Supply chain security: Eliminates risk from potentially compromised public packages + +Hardened system packages are included in supported distributions of DHI images. +Community users can also configure their package manager to use Docker's public +hardened package repository in their own images for the same packages included +in the base images. See [Use hardened system packages](./how-to/hardened-packages.md) +for details. + +### Total transparency + +Every image includes complete, verifiable security metadata: + +- SLSA Build Level 3 provenance: Verifiable, tamper-resistant builds that meet supply chain security standards +- Signed SBOMs: Complete Software Bill of Materials for every component +- VEX statements: Vulnerability Exploitability eXchange documents provide context about known CVEs +- Cryptographic signatures: All images and metadata are signed for authenticity + +### Built for developers + +- Familiar foundations: Built on Alpine and Debian, requiring minimal changes to adopt +- glibc and musl support: Available in both variants for broad application compatibility +- Development and runtime variants: Use dev images for building, minimal runtime images for production +- Drop-in compatibility: Works seamlessly with existing Docker workflows, CI/CD pipelines, and tools + +### Continuous maintenance + +- Automatic patching: Images are rebuilt and updated when upstream security + patches become available, with no SLA-backed time commitments for DHI + Community users +- Scanner integration: Direct integration with scanners and other security platforms + +### Kubernetes and Helm chart support + +Docker Hardened Image (DHI) charts are Docker-provided Helm charts built from +upstream sources, designed for compatibility with Docker Hardened Images. These +charts are available as OCI artifacts within the DHI catalog on Docker Hub. DHI +charts are robustly tested after building to ensure they work out-of-the-box +with Docker Hardened Images. This removes friction in migration and reduces +developer workload in implementing the charts, ensuring seamless compatibility. + +Like the hardened images, DHI charts incorporate multiple layers of security +metadata to ensure transparency and trust: + +- SLSA Level 3 compliance: Each chart is built with Docker's SLSA Build Level 3 + system, including a detailed build provenance, and meeting the standards set + by the Supply-chain Levels for Software Artifacts (SLSA) framework. +- Software Bill of Materials (SBOMs): Comprehensive SBOMs are provided, + detailing all components referenced within the chart to facilitate + vulnerability management and compliance audits. +- Cryptographic signing: All associated metadata is cryptographically signed by + Docker, ensuring integrity and authenticity. +- Hardened configuration: Charts automatically reference Docker hardened images, + ensuring security in deployments. + +## DHI Select and Enterprise features + +For organizations with strict security requirements, regulatory demands, or +operational needs, DHI Select and Enterprise deliver additional capabilities. + +DHI Select offers customizations, compliance variants, and SLA-backed updates +for teams and organizations with production workloads. DHI Enterprise includes +everything in Select with unlimited customizations, plus an optional Extended +Lifecycle Support add-on and full catalog access for large enterprises with +advanced security needs. + +For a detailed comparison, see [Docker Hardened Images subscription +comparison](https://www.docker.com/products/hardened-images/#compare). + +### SLA-backed security {tier="DHI Select or DHI Enterprise"} + +- CVE remediation SLA: 7-day SLA for critical and high severity vulnerabilities +- Continuous patching: Regular security updates backed by SLA commitments +- Enterprise support: Access to Docker's support team for mission-critical applications + +For complete details, see the [Support Service Level Agreement](https://docs.docker.com/go/dhi-sla/). + +### Compliance variants {tier="DHI Select or DHI Enterprise"} + +- FIPS-enabled images: For regulated industries and government systems +- STIG-ready images: Meet DoD Security Technical Implementation Guide requirements + +### Customization and control {tier="DHI Select or DHI Enterprise"} + +- Build custom images: Add your own packages, tools, certificates, and configurations + - DHI Select: Up to 5 customizations + - DHI Enterprise: Unlimited customizations +- Hardened packages: Access to additional compliance-specific packages (such as + FIPS variants) and Docker-patched packages not available in the public repository + - DHI Select: Add these packages through the customization UI when customizing hardened images + - DHI Enterprise: Add these packages through the customization UI, or configure + your package manager to use the enterprise package repository in your own images +- Secure build infrastructure: Customizations built on Docker's trusted infrastructure +- Full chain of trust: Customized images maintain provenance and cryptographic signing +- Automatic updates: Custom images are automatically rebuilt when base images are patched + +### Extended Lifecycle Support {tier="DHI Enterprise add-on"} + +- Post-EOL security coverage: Continue receiving patches for years after upstream support ends +- Continuous compliance: Updated SBOMs, provenance, and signing for audit requirements +- Production continuity: Keep production running securely without forced migrations + +## Learn more + +- [Explore how DHI images are built and more](/dhi/explore/) +- [Get started using DHIs](/dhi/get-started/) +- [Contact Docker for DHI Enterprise](https://www.docker.com/pricing/contact-sales/) diff --git a/content/manuals/dhi/get-started.md b/content/manuals/dhi/get-started.md new file mode 100644 index 00000000000..9ad8d9981bc --- /dev/null +++ b/content/manuals/dhi/get-started.md @@ -0,0 +1,153 @@ +--- +linktitle: Quickstart +title: Docker Hardened Images quickstart +description: Follow a quickstart guide to explore and run a Docker Hardened Image. +weight: 2 +keywords: docker hardened images quickstart, run secure image +--- + +This guide shows you how to go from zero to running a Docker Hardened Image +(DHI) using a real example. At the end, you'll compare the DHI to a standard +Docker image to better understand the differences. While the steps use a +specific image as an example, they can be applied to any DHI. + +This quickstart uses DHI Community images from `dhi.io`. You sign in with your +Docker account, pull and run an image, and compare it with a Docker Official Image. + +> [!NOTE] +> +> If you have a DHI Select or Enterprise subscription, see [Get started with DHI +> Select and Enterprise](./how-to/select-enterprise.md) instead. Select and +> Enterprise use mirrored repositories in your organization namespace on Docker +> Hub to enable customization, SLA-backed security updates, and access to +> compliance variants. + +## Step 1: Find an image to use + +1. Go to the Hardened Images catalog in [Docker + Hub](https://hub.docker.com/hardened-images/catalog). +2. Use the search bar or filters to find an image (for example, `python`, + `node`, or `golang`). For this example, search for `python`. +3. Select the Python repository to view its details. + +Continue to the next step to pull and run the image. To dive deeper into searching +and evaluating images, see [Search and evaluate Docker Hardened Images](./how-to/explore.md). + +## Step 2: Pull and run the image + +You can pull and run a DHI like any other Docker image. Note that Docker Hardened +Images are designed to be minimal and secure, so they may not include all the +tools or libraries you expect in a typical image. You can view the typical +differences in [Considerations when adopting +DHIs](./how-to/use.md#considerations-when-adopting-dhis). + +The following example demonstrates that you can run the Python image and execute +a simple Python command just like you would with any other Docker image: + +1. Open a terminal and sign in to the Docker Hardened Images registry using your + Docker account credentials. + + ```console + $ docker login dhi.io + ``` + + > [!TIP] + > + > If you don't have a Docker account, [create a free + > account](https://hub.docker.com/signup) to get started. + +2. Pull the image: + + ```console + $ docker pull dhi.io/python:3.13 + ``` + +3. Run the image to confirm everything works: + + ```console + $ docker run --rm dhi.io/python:3.13 python -c "print('Hello from DHI')" + ``` + + This starts a container from the `python:3.13` image and runs a simple + Python script that prints `Hello from DHI`. + +To dive deeper into using images, see: + +- [Use a Docker Hardened Image](./how-to/use.md) for general usage +- [Use a Helm chart](./how-to/helm.md) for deploying with Helm + +## Step 3: Compare with other images + +You can quickly compare DHIs with other images to see the security +improvements and differences. This comparison helps you understand the value of +using hardened images. + +Run the following command to compare the Docker Hardened Image for Python with +the non-hardened Docker Official Image for Python from Docker Hub. Look for the +`## Overview` section in the output for a summary comparison. + +```console +$ docker scout compare dhi.io/python:3.13 \ + --to python:3.13 \ + --platform linux/amd64 \ + --ignore-unchanged +``` + +The `## Overview` section of the output looks similar to the following: + +```plaintext + ## Overview + + │ Analyzed Image │ Comparison Image + ────────────────────┼───────────────────────────────────────────────────────┼─────────────────────────────────────────────── + Target │ dhi.io/python:3.13 │ python:3.13 + digest │ c215e9da9f84 │ 7f48e892134c + tag │ 3.13 │ 3.13 + platform │ linux/amd64 │ linux/amd64 + provenance │ https://github.com/docker-hardened-images/definitions │ https://github.com/docker-library/python.git + │ 77a629b3d0db035700206c2a4e7ed904e5902ea8 │ 3f2d7e4c339ab883455b81a873519f1d0f2cd80a + vulnerabilities │ 0C 0H 0M 0L │ 0C 1H 5M 141L 2? + │ -1 -5 -141 -2 │ + size │ 35 MB (-377 MB) │ 412 MB + packages │ 80 (-530) │ 610 + │ │ +``` + +> [!NOTE] +> +> This is example output. Your results may vary depending on newly discovered +> CVEs and image updates. + +Docker maintains near-zero CVEs in Docker Hardened Images. For DHI Select and +Enterprise subscriptions, when new CVEs are discovered, the CVEs are remediated +within the industry-leading SLA time frame. Learn more about the [SLA-backed +security features](./features.md#sla-backed-security). + +This comparison shows that the Docker Hardened Image: + +- Removes vulnerabilities: 1 high, 5 medium, 141 low, and 2 unspecified severity CVEs removed +- Reduces size: From 412 MB down to 35 MB (91% reduction) +- Minimizes packages: From 610 packages down to 80 (87% reduction) + +To dive deeper into comparing images see [Search and evaluate Docker Hardened Images](./how-to/explore.md#compare-and-evaluate-images). + +## What's next + +You've pulled and run your first Docker Hardened Image. Here are a few ways to keep going: + +- [Migrate existing applications to DHIs](./migration/migrate-with-ai.md): Use + Gordon to update your Dockerfiles to use Docker Hardened Images as the base. + +- [Start a trial](https://hub.docker.com/hardened-images/start-free-trial) to + explore the benefits of a DHI subscription, such as access to FIPS and STIG + variants, customized images, and SLA-backed updates. + +- [Get started with DHI Select and Enterprise](./how-to/select-enterprise.md): + After subscribing to a DHI subscription or starting a trial, learn how to + mirror repositories, customize images, and access compliance variants. + +- [Verify DHIs](./how-to/verify.md): Use tools like [Docker Scout](/scout/) or + Cosign to inspect and verify signed attestations, like SBOMs and provenance. + +- [Scan DHIs](./how-to/scan.md): Analyze the image with Docker Scout or other + scanners to identify known CVEs. diff --git a/content/manuals/dhi/how-to/_index.md b/content/manuals/dhi/how-to/_index.md new file mode 100644 index 00000000000..9a5ffaeb94e --- /dev/null +++ b/content/manuals/dhi/how-to/_index.md @@ -0,0 +1,98 @@ +--- +title: How-tos +description: Step-by-step guidance for working with Docker Hardened Images, from discovery to governance. +weight: 20 +aliases: + - /dhi/how-to/manage/ +params: + grid_discover: + - title: Search and evaluate Docker Hardened Images + description: Learn how to find and evaluate image repositories, variants, metadata, and attestations in the DHI catalog on Docker Hub. + icon: travel_explore + link: /dhi/how-to/explore/ + grid_adopt: + - title: Get started with DHI Select and Enterprise + description: Learn how to mirror repositories, customize images, and access compliance variants with DHI Select and Enterprise subscriptions. + icon: rocket_launch + link: /dhi/how-to/select-enterprise/ + - title: Use the DHI CLI + description: Use the dhictl command-line tool to manage and interact with Docker Hardened Images. + icon: terminal + link: /dhi/how-to/cli/ + - title: Mirror a Docker Hardened Image repository + description: Learn how to mirror an image into your organization's namespace and optionally push it to another private registry. + icon: compare_arrows + link: /dhi/how-to/mirror/ + - title: Customize a Docker Hardened Image or chart + description: Learn how to customize Docker Hardened Images and charts. + icon: settings + link: /dhi/how-to/customize/ + - title: Use hardened system packages + description: Learn how to use Docker's hardened system packages in your images. + icon: inventory_2 + link: /dhi/how-to/hardened-packages/ + - title: Use a Docker Hardened Image + description: Learn how to pull, run, and reference Docker Hardened Images in Dockerfiles, CI pipelines, and standard development workflows. + icon: play_arrow + link: /dhi/how-to/use/ + - title: Use a Docker Hardened Image chart + description: Learn how to use a Docker Hardened Image chart. + icon: leaderboard + link: /dhi/how-to/helm/ + grid_verify: + - title: Verify a Docker Hardened Image or chart + description: Use Docker Scout or cosign to verify signed attestations like SBOMs, provenance, and vulnerability data for Docker Hardened Images and charts. + icon: check_circle + link: /dhi/how-to/verify/ + - title: Scan Docker Hardened Images + description: Learn how to scan Docker Hardened Images for known vulnerabilities using Docker Scout, Grype, or Trivy. + icon: bug_report + link: /dhi/how-to/scan/ + grid_govern: + - title: Enforce Docker Hardened Image usage with policies + description: Learn how to use image policies with Docker Scout for Docker Hardened Images. + icon: policy + link: /dhi/how-to/policies/ +--- + +This section provides practical, task-based guidance for working with Docker +Hardened Images (DHIs). Whether you're evaluating DHIs for the first time or +integrating them into a production CI/CD pipeline, these topics cover the key +tasks across the adoption journey: discover, adopt, verify, and govern. + +The topics are organized around the typical lifecycle of working with DHIs, but +you can use them as needed based on your specific workflow. + +Explore the topics below that match your current needs. + +## Discover + +Explore available images and metadata in the DHI catalog. + +{{< grid + items="grid_discover" +>}} + +## Adopt + +Mirror trusted images, customize as needed, and integrate into your workflows. + +{{< grid + items="grid_adopt" +>}} + +## Verify + +Check signatures, SBOMs, and provenance, and scan for vulnerabilities. + +{{< grid + items="grid_verify" +>}} + +## Govern + +Enforce policies to maintain security and compliance. + +{{< grid + items="grid_govern" +>}} diff --git a/content/manuals/dhi/how-to/build.md b/content/manuals/dhi/how-to/build.md new file mode 100644 index 00000000000..56cb45aebac --- /dev/null +++ b/content/manuals/dhi/how-to/build.md @@ -0,0 +1,842 @@ +--- +title: Create and build a Docker Hardened Image +linktitle: Create and build an image +description: Learn how to write a DHI definition file and build your own Docker Hardened Image from the declarative YAML schema. +keywords: hardened images, DHI, build, yaml, security, sbom, provenance, declarative, catalog, definition file +weight: 26 +--- + +Docker Hardened Images (DHI) are built from declarative YAML definition files +instead of traditional Dockerfiles. A single YAML file describes exactly what +goes into an image: packages, users, environment variables, entrypoint, and +metadata. The DHI build system produces a signed image containing only the required +packages, with a Software Bill of Materials (SBOM) and SLSA Build Level 3 +provenance. + +This page explains how to write a DHI definition file, build images locally, and +use advanced patterns such as build stages, third-party repositories, file +paths, and dev variants. + +> [!IMPORTANT] +> +> The DHI build system pulls base images and build tools from `dhi.io`, so you +> must authenticate to that registry before building a definition file. Use your +> Docker ID credentials (the same username and password you use for Docker Hub) +> when signing in. +> +> Run `docker login dhi.io` to authenticate. + +## How DHI builds differ from Dockerfiles + +A Dockerfile is a sequence of imperative instructions: `RUN`, `COPY`, `FROM`. +A DHI definition file is a declarative specification. You describe the desired +state of the image, and the build system figures out how to produce it. + +Every DHI definition starts with a syntax directive that tells BuildKit which +DHI build frontend to use. The frontend is the component that parses and +processes YAML definitions instead of the default Dockerfile parser: + +```yaml +# syntax=dhi.io/build:2-alpine3.23 +``` + +The frontend version corresponds to the base distribution: + +| Distribution | Syntax directive | +|---------------------|----------------------------------------| +| Alpine 3.22 | `# syntax=dhi.io/build:2-alpine3.22` | +| Alpine 3.23 | `# syntax=dhi.io/build:2-alpine3.23` | +| Debian 12 (Bookworm)| `# syntax=dhi.io/build:2-debian12` | +| Debian 13 (Trixie) | `# syntax=dhi.io/build:2-debian13` | + +The DHI build system reads the YAML, resolves packages from the specified +repositories, assembles the filesystem, creates user accounts, sets metadata, +and produces a signed OCI image. + +## Explore the catalog for reference + +The [DHI catalog repository](https://github.com/docker-hardened-images/catalog) +is open source under Apache 2.0 and contains every official image definition. +Studying existing definitions is the best way to learn the YAML patterns for +different image types. + +The catalog follows this directory structure: + +```text +catalog/ +├── image/ +│ ├── alpine-base/ +│ │ ├── alpine-3.23/ +│ │ │ ├── 3.23.yaml # runtime variant +│ │ │ └── 3.23-dev.yaml # dev variant +│ │ ├── guides.md +│ │ ├── info.yaml +│ │ ├── logo.svg +│ │ └── overview.md +│ ├── nginx/ +│ │ ├── alpine-3.22/ +│ │ ├── alpine-3.23/ +│ │ │ ├── mainline.yaml +│ │ │ ├── mainline-dev.yaml +│ │ │ ├── stable.yaml +│ │ │ └── stable-dev.yaml +│ │ ├── debian-12/ +│ │ ├── debian-13/ +│ │ ├── bin/ +│ │ ├── guides.md +│ │ ├── info.yaml +│ │ ├── logo.svg +│ │ └── overview.md +│ └── redis/ +│ ├── debian-13/ +│ │ ├── 8.0.yaml # runtime +│ │ ├── 8.0-dev.yaml # dev +│ │ ├── 8.0-compat.yaml # compat runtime +│ │ └── 8.0-compat-dev.yaml # compat dev +│ ├── guides.md +│ ├── info.yaml +│ ├── logo.svg +│ └── overview.md +├── chart/ +└── package/ +``` + +Each image organizes its variants by distribution. Images support multiple +variant types: + +- A `runtime` variant is minimal and typically runs as a non-root user. +- A `dev` variant adds a shell, package manager, and development tools. +- A compatibility variant adds common shell utilities such as `bash`, + `coreutils`, `grep`, and `sed` for use with existing workflows. Compatibility + images use the `flavor: compat` field alongside a `runtime` or `dev` variant. +- A compatibility-dev variant combines the compatibility packages with dev + tools. + +Some images also support additional flavors such as `sfw` (software framework) +variants. Refer to the catalog for the full list of available variants for each +image. + +## Try it: build a catalog image + +Before writing your own definition, try building an existing catalog image +directly from GitHub: + +```console +$ docker buildx build \ + https://raw.githubusercontent.com/docker-hardened-images/catalog/refs/heads/main/image/alpine-base/alpine-3.23/3.23.yaml \ + --sbom=generator=dhi.io/scout-sbom-indexer:1 \ + --provenance=1 \ + --tag my-alpine-base:3.23 \ + --load +``` + +This downloads the definition file directly from GitHub and builds it locally. +After the build completes, verify the image: + +```console +$ docker images my-alpine-base +``` + +To modify an image, clone the catalog and edit the YAML files locally: + +```console +$ git clone https://github.com/docker-hardened-images/catalog.git +$ cd catalog +``` + +## YAML schema reference + +The following sections describe the fields available in a DHI definition file. + +### Required fields + +Every definition must include these top-level fields: + +| Field | Description | +|-------------|---------------------------------------------------------------------| +| `name` | Human-readable name for the image. | +| `image` | Full registry path, such as `dhi.io/my-image`. | +| `variant` | Image variant type: `runtime` or `dev`. | +| `tags` | List of image tags. | +| `platforms` | Target architectures, such as `linux/amd64` and `linux/arm64`. | +| `contents` | Package repositories and packages to install. | + +### Image metadata + +These fields add metadata to the image: + +| Field | Description | +|---------------|-------------------------------------------------------------------| +| `os-release` | Defines the `/etc/os-release` contents inside the image. | +| `annotations` | OCI image annotations such as description and license. | +| `dates` | Release date and end-of-life date. | +| `vars` | Build-time variables for templating. | +| `flavor` | Image flavor modifier, such as `compat` for compatibility images.| + +### Container configuration + +These fields control how the container runs: + +| Field | Description | +|---------------|-------------------------------------------------------------------| +| `accounts` | Users, groups, and the `run-as` user. | +| `environment` | Environment variables. | +| `entrypoint` | Container entrypoint command. | +| `cmd` | Default command arguments. | +| `work-dir` | Working directory inside the container. | +| `volumes` | Volume mount points. | +| `ports` | Exposed network ports. | +| `paths` | Directories, files, and symlinks to create. | + +### Advanced fields + +These fields support more complex build patterns: + +| Field | Description | +|----------------------|--------------------------------------------------------------| +| `contents.builds` | Build stages with shell pipelines. | +| `contents.keyring` | Signing keys for third-party package repositories. | +| `contents.artifacts` | Pre-built OCI artifacts to include. | +| `contents.mappings` | Package URL (purl) mappings for SBOM accuracy. | +| `contents.files` | Source files fetched from Git URLs with checksums. | + +## Create a minimal image + +Start with the simplest possible definition: an Alpine base image with a +non-root user. + +Create a directory for your project and add a file called `base.yaml`: + +```yaml +# syntax=dhi.io/build:2-alpine3.23 + +name: My Base Image +image: my-registry/my-base +variant: runtime +tags: + - "1.0.0" + - "1.0" +platforms: + - linux/amd64 + - linux/arm64 + +contents: + repositories: + - https://dl-cdn.alpinelinux.org/alpine/v3.23/main + - https://dl-cdn.alpinelinux.org/alpine/v3.23/community + packages: + - alpine-baselayout-data + - busybox + - ca-certificates-bundle + +accounts: + run-as: nonroot + users: + - name: nonroot + uid: 65532 + gid: 65532 + groups: + - name: nonroot + gid: 65532 + members: + - nonroot + +os-release: + name: Docker Hardened Images (Alpine) + id: alpine + version-id: "3.23" + pretty-name: My Hardened Image + home-url: https://docker.com/products/hardened-images/ + bug-report-url: https://docker.com/support/ + +environment: + SSL_CERT_FILE: /etc/ssl/certs/ca-certificates.crt + +annotations: + org.opencontainers.image.description: A minimal Alpine base image + +cmd: + - /bin/sh +``` + +In this definition: + +- `contents.repositories` uses full URLs to Alpine package mirrors. +- `contents.packages` lists exact Alpine package names. +- The `accounts` block creates a `nonroot` user (UID 65532) and sets it as the + default user for the container. +- The `os-release` block defines what appears in `/etc/os-release`. Always + include `bug-report-url` alongside `home-url`. +- The `annotations` block adds OCI metadata visible in registries and Docker + Scout reports. + +Build the image: + +```console +$ docker buildx build . -f base.yaml \ + --sbom=generator=dhi.io/scout-sbom-indexer:1 \ + --provenance=1 \ + --tag my-base:latest \ + --load +``` +> [!NOTE] +> +> The `tags` field in the spec file defines the image metadata (variant and +> version labels embedded in the image manifest). The `--tag` flag on the CLI +> sets the OCI image reference used to push or load the image. These serve +> different purposes - the spec file tags describe *what the image is*, while +> the CLI tag determines *where it's stored*. + +## Use a Debian base with third-party repositories + +For applications that require Debian packages or third-party APT repositories, +use the Debian syntax directive. The following example builds a Redis image +from the official Redis APT repository. + +Create a file called `redis.yaml`: + +```yaml +# syntax=dhi.io/build:2-debian13 + +name: Redis 8.0.x +image: my-registry/my-redis +variant: runtime +tags: + - "8.0" + - "8.0.5" +platforms: + - linux/amd64 + - linux/arm64 + +contents: + repositories: + - deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb trixie main + keyring: + - https://packages.redis.io/gpg + packages: + - '!libelogind0' + - '!mawk' + - '!original-awk' + - base-files + - libpcre2-8-0 + - libssl3t64 + - libstdc++6 + - libsystemd0 + - redis=6:8.0.5-1rl1~trixie1 + - redis-server=6:8.0.5-1rl1~trixie1 + - redis-tools=6:8.0.5-1rl1~trixie1 + - tini + mappings: + redis: pkg:deb/redis/redis@6:8.0.5-1rl1~trixie1?os_name=debian&os_version=13 + redis-server: pkg:deb/redis/redis-server@6:8.0.5-1rl1~trixie1?os_name=debian&os_version=13 + redis-tools: pkg:deb/redis/redis-tools@6:8.0.5-1rl1~trixie1?os_name=debian&os_version=13 + +accounts: + run-as: nonroot + users: + - name: nonroot + uid: 65532 + gid: 65532 + groups: + - name: nonroot + gid: 65532 + members: + - nonroot + +os-release: + name: Docker Hardened Images (Debian) + id: debian + version-id: "13" + version-codename: trixie + pretty-name: Docker Hardened Images/Debian GNU/Linux 13 (trixie) + home-url: https://docker.com/products/hardened-images/ + bug-report-url: https://docker.com/support/ + +work-dir: /data + +environment: + REDIS_VERSION: 8.0.5 + +annotations: + org.opencontainers.image.description: A minimal Redis image + org.opencontainers.image.licenses: AGPL-3.0-only + +entrypoint: + - /usr/bin/tini + - -- + +cmd: + - redis-server + - /etc/redis/redis.conf + - --include + - /etc/redis/conf.d/*.conf +``` + +This example introduces several patterns: + +- **Third-party repositories**: The `repositories` field uses the Debian + `deb [signed-by=...] URL suite component` format for APT sources. +- **Keyring**: The `keyring` field downloads the GPG key used to verify packages + from the third-party repository. +- **Package exclusions**: Prefix a package name with `!` to explicitly exclude + it. This prevents unwanted dependencies from being installed. In this case, + `!libelogind0`, `!mawk`, and `!original-awk` are excluded. +- **Debian version pinning**: Use the full epoch format, + `redis-server=6:8.0.5-1rl1~trixie1`, to pin exact package versions. +- **SBOM mappings**: The `mappings` field provides Package URL (purl) metadata + so that Docker Scout can accurately identify the software in the SBOM. +- **Init process**: The `entrypoint` uses `tini` as a lightweight init process + (PID 1) to handle signal forwarding and zombie process reaping. +- **Config includes**: The `cmd` uses `--include /etc/redis/conf.d/*.conf` so + that configuration files created in the `paths` section are loaded at startup. + +## Create paths + +Use the `paths` field to create directories, files with inline content, and +symlinks inside the image. The following example extends the Redis definition +with the paths required for operation: + +```yaml +paths: + - type: directory + path: /var/lib/redis + uid: 65532 + gid: 65532 + mode: "0755" + - type: directory + path: /var/log/redis + uid: 65532 + gid: 65532 + mode: "0755" + - type: directory + path: /run/redis/ + uid: 65532 + gid: 65532 + mode: "0755" + - type: directory + path: /data + uid: 65532 + gid: 65532 + mode: "0755" + - type: file + path: /etc/redis/conf.d/docker.conf + content: | + daemonize no + bind 0.0.0.0 -::1 + logfile "" + uid: 0 + gid: 0 + mode: "0555" + - type: symlink + path: /usr/bin/redis-sentinel + uid: 0 + gid: 0 + source: /usr/bin/redis-check-rdb +``` + +Three path types are available: + +| Type | Required fields | Description | +|-------------|----------------------------------|--------------------------------------| +| `directory` | `path`, `uid`, `gid`, `mode` | Creates an empty directory. | +| `file` | `path`, `content`, `uid`, `gid`, `mode` | Creates a file with inline content. | +| `symlink` | `path`, `source`, `uid`, `gid` | Creates a symbolic link. | + +The `mode` field uses a string representation of the octal permission bits, +such as `"0755"` for read-write-execute by owner or `"0555"` for read-execute +by all. Note that the `file` type supports inline `content` using a YAML +multi-line string. + +## Add build stages + +For images that need to run shell commands during the build, such as +configuring files, creating symlinks, or adjusting permissions, use the +`contents.builds` field. Each build stage has its own packages, a pipeline +of named steps, and output mappings. + +The following example configures Nginx during the build to run on an +unprivileged port and disable server tokens: + +```yaml +# syntax=dhi.io/build:2-alpine3.23 + +name: Nginx mainline +image: my-registry/my-nginx +variant: runtime +tags: + - "1.29" +platforms: + - linux/amd64 + - linux/arm64 + +contents: + repositories: + - https://dl-cdn.alpinelinux.org/alpine/v3.23/main + - https://dl-cdn.alpinelinux.org/alpine/v3.23/community + - http://nginx.org/packages/mainline/alpine/v3.23/main + keyring: + - https://nginx.org/keys/nginx_signing.rsa.pub + packages: + - alpine-baselayout-data + - busybox + - musl-utils + - nginx=1.29.5-r1 + builds: + - name: nginx + contents: + repositories: + - https://dl-cdn.alpinelinux.org/alpine/v3.23/main + - https://dl-cdn.alpinelinux.org/alpine/v3.23/community + - http://nginx.org/packages/mainline/alpine/v3.23/main + keyring: + - https://nginx.org/keys/nginx_signing.rsa.pub + packages: + - alpine-baselayout-data + - bash + - musl-utils + - nginx=1.29.5-r1 + pipeline: + - name: install + runs: | + set -eux -o pipefail + + ln -sf /dev/stdout /var/log/nginx/access.log + ln -sf /dev/stderr /var/log/nginx/error.log + + sed -i "s,listen 80;,listen 8080;," /etc/nginx/conf.d/default.conf + sed -i "/user nginx;/d" /etc/nginx/nginx.conf + sed -i "s,pid /run/nginx.pid;,pid /var/run/nginx.pid;," /etc/nginx/nginx.conf + sed -i '/^http {$/a\ server_tokens off;' /etc/nginx/nginx.conf + + chown -R 65532:65532 /var/cache/nginx + chmod -R g+w /var/cache/nginx + chown -R 65532:65532 /etc/nginx + chmod -R g+w /etc/nginx + chown -R 65532:65532 /run + chown -R 65532:65532 /run/lock + chown -R 65532:65532 /var/run + chown -R 65532:65532 /var/log/nginx + outputs: + - source: / + target: / + uid: 0 + gid: 0 + diff: true + +accounts: + run-as: nginx + users: + - name: nginx + uid: 65532 + gid: 65532 + groups: + - name: nginx + gid: 65532 + members: + - nginx + - name: www-data + gid: 82 + +os-release: + name: Docker Hardened Images (Alpine) + id: alpine + version-id: "3.23" + pretty-name: Docker Hardened Images/Alpine Linux v3.23 + home-url: https://docker.com/products/hardened-images/ + bug-report-url: https://docker.com/support/ + +environment: + NGINX_VERSION: 1.29.5-r1 + +annotations: + org.opencontainers.image.description: A minimal Nginx image + org.opencontainers.image.licenses: BSD-2-Clause + +entrypoint: + - nginx + +cmd: + - -g + - daemon off; + +ports: + - 8080/tcp +``` + +Key patterns in this definition: + +| Element | Description | +|-------------|----------------------------------------------------------------------------| +| `contents` | Each build stage has its own `contents` section. Include packages needed only during the build, such as `bash`. | +| `pipeline` | Contains named steps that run shell commands. Always start scripts with `set -eux -o pipefail`. | +| `outputs` | Copies results from the build stage into the final image. Setting `diff: true` copies only files that changed, keeping the image minimal. | +| `accounts` | Nginx uses a dedicated `nginx` user (UID 65532) instead of `nonroot`. The `www-data` group (GID 82) is also created for web server compatibility. | +| `musl-utils` | Required in both the main and build packages for Alpine-based Nginx images. | + +## Use OCI artifacts as package sources + +Instead of installing packages from Alpine or Debian repositories, you can pull +pre-built binaries from DHI package artifacts. This is how the catalog builds +images like Python and Node.js — the runtime is compiled separately and +published as an OCI artifact, then referenced by digest in the image definition. + +Add the `artifacts` field under `contents`: + +```yaml +contents: + repositories: + - https://dl-cdn.alpinelinux.org/alpine/v3.23/main + - https://dl-cdn.alpinelinux.org/alpine/v3.23/community + packages: + - alpine-baselayout-data + - bzip2 + - ca-certificates-bundle + - expat + - gdbm + - libffi + - mpdecimal + - musl + - ncurses + - openssl + - readline + - sqlite-libs + - tzdata + - zlib + artifacts: + - name: dhi.io/pkg-python:3.13.12-alpine3.23@sha256:052b3b915055006a27c42470eed5c65d7ee92d2c3de47ecaedcc6bbd36077b95 + includes: + - opt/** + uid: 0 + gid: 0 +``` + +| Field | Description | +|------------|------------------------------------------------------------------------------| +| `name` | Full OCI reference with digest pin. Always use `@sha256:` for reproducibility. | +| `includes` | Glob patterns for files to extract from the artifact. Paths are resolved from the filesystem root; `opt/**` includes everything under the `/opt` path. | +| `excludes` | Glob patterns for files to skip. Useful for removing headers, docs, or unused binaries. | +| `uid`, `gid` | Ownership for extracted files. | + +Available DHI packages are in the +[`package/`](https://github.com/docker-hardened-images/catalog/tree/main/package) +directory of the catalog repository. + +## Create a dev variant + +A dev variant of an image adds a shell, package manager, and development tools. +This is useful for debugging and for use as a build stage in multi-stage +workflows. + +To create a dev variant, change the `variant` field and enable root access: + +```yaml +# syntax=dhi.io/build:2-alpine3.23 + +name: Alpine 3.23 Base (dev) +image: my-registry/my-base +variant: dev +tags: + - "1.0-dev" +platforms: + - linux/amd64 + - linux/arm64 + +contents: + repositories: + - https://dl-cdn.alpinelinux.org/alpine/v3.23/main + - https://dl-cdn.alpinelinux.org/alpine/v3.23/community + packages: + - alpine-baselayout-data + - apk-tools + - busybox + - ca-certificates-bundle + +accounts: + root: true + run-as: root + users: + - name: nonroot + uid: 65532 + gid: 65532 + groups: + - name: nonroot + gid: 65532 + members: + - nonroot + +os-release: + name: Docker Hardened Images (Alpine) + id: alpine + version-id: "3.23" + pretty-name: Docker Hardened Images/Alpine Linux v3.23 + home-url: https://docker.com/products/hardened-images/ + bug-report-url: https://docker.com/support/ + +environment: + SSL_CERT_FILE: /etc/ssl/certs/ca-certificates.crt + +annotations: + org.opencontainers.image.description: A minimal Alpine base image + +cmd: + - /bin/sh +``` + +The key differences from a runtime variant: + +- `variant: dev` instead of `variant: runtime`. +- `accounts.root: true` enables the root account. +- `run-as: root` sets root as the default user. +- `apk-tools` is added to packages, giving the image a package manager. +- The `nonroot` user is still defined so that applications can switch to an + unprivileged user at runtime. + +For Debian-based dev variants, add `apt` instead of `apk-tools` and include the +`DEBIAN_FRONTEND: noninteractive` environment variable. + +## Create a compatibility variant + +A compatibility variant includes common shell utilities for use with +scripts and automation tools that expect a standard Linux userland. Compatibility +images use the `flavor` field: + +```yaml +variant: runtime +flavor: compat +``` + +A compatibility variant adds packages such as `bash`, `coreutils`, `findutils`, +`grep`, `hostname`, `openssl`, `procps`, and `sed` alongside the application +packages. A compatibility-dev variant combines both the compatibility packages +and the dev tools: + +```yaml +variant: dev +flavor: compat +``` + +Refer to the Redis compatibility images in the catalog for a complete example of +the compatibility pattern. + +## Set ports and volumes + +Use the `ports` field to declare which ports the container exposes. Always use +unprivileged ports (higher than 1024) when the container runs as a non-root +user. + +```yaml +ports: + - 8080/tcp +``` + +Use the `volumes` field to declare volume mount points: + +```yaml +volumes: + - /data +``` + +## Set annotations + +OCI annotations add machine-readable metadata to the image. Use the +`annotations` field: + +```yaml +annotations: + org.opencontainers.image.description: A minimal hardened application image + org.opencontainers.image.licenses: Apache-2.0 +``` + +These annotations appear in Docker Scout reports and container registry +interfaces. + +## Build and verify + +### Build the image + +Build a single-platform image for local testing: + +```console +$ docker buildx build . -f my-image.yaml \ + --sbom=generator=dhi.io/scout-sbom-indexer:1 \ + --provenance=1 \ + --tag my-image:latest \ + --load +``` + +### Inspect the SBOM + +View the generated Software Bill of Materials: + +```console +$ docker scout sbom my-image:latest +``` + +### Scan for vulnerabilities + +Check the image against known CVE databases: + +```console +$ docker scout cves my-image:latest +``` + +### Compare with a non-hardened image + +Measure the security improvement against an equivalent non-hardened image: + +```console +$ docker scout compare my-image:latest \ + --to : \ + --platform linux/amd64 +``` + +Replace `` with the Docker Official Image or +community image you're comparing against. + +### Inspect with Docker Debug + +Verify the os-release and entrypoint configuration: + +```console +$ docker debug my-image:latest +``` + +The output shows the detected distribution name from your `os-release` +configuration and runs an entrypoint lint check. + +## Push to a registry + +Tag and push the image to your container registry: + +```console +$ docker tag my-image:latest /my-image:latest +``` + +```console +$ docker push /my-image:latest +``` + +Replace `` with your Docker Hub username or organization +namespace. + +## Contribute to the catalog + +Docker Hardened Images is an open source project. You can contribute new image +definitions or improve existing ones by submitting a pull request to the +[catalog repository](https://github.com/docker-hardened-images/catalog). + +To contribute a new image: + +1. Fork the catalog repository. +2. Create a directory under `image/` following the naming convention: + `image///`. +3. Add your YAML definition files (one per variant). +4. Add an `info.yaml` with display name, description, and categories. +5. Add an `overview.md` describing the image. +6. Add a `logo.svg` for the image icon. +7. Add a `guides.md` with usage documentation. +8. Open a pull request against the `main` branch. + +For more details, read the +[contributing guide](https://github.com/docker-hardened-images/catalog/blob/main/CONTRIBUTING.md) +in the catalog repository. diff --git a/content/manuals/dhi/how-to/cli.md b/content/manuals/dhi/how-to/cli.md new file mode 100644 index 00000000000..7d1618d0d84 --- /dev/null +++ b/content/manuals/dhi/how-to/cli.md @@ -0,0 +1,224 @@ +--- +title: Use the DHI CLI +linkTitle: Use the CLI +weight: 50 +keywords: docker dhi, CLI, command line, docker hardened images +description: Learn how to install and use docker dhi, the command-line interface for managing Docker Hardened Images. +--- + +The `docker dhi` command-line interface (CLI) is a tool for managing Docker Hardened Images: +- Browse the catalog of available DHI images and their metadata +- Mirror DHI images to your Docker Hub organization +- Create and manage customizations of DHI images +- Generate authentication for enterprise package repositories +- Monitor customization builds + +## Installation + +The `docker dhi` CLI is available in [Docker Desktop](https://docs.docker.com/desktop/) version 4.65 and later. +You can also install the standalone `dhictl` binary. + +### Docker Desktop + +The `docker dhi` command is included in Docker Desktop 4.65 and later. No additional installation is required. + +### Standalone binary + +1. Download the `dhictl` binary for your platform from the + [releases](https://github.com/docker-hardened-images/dhictl/releases) page. +2. Move it to a directory in your `PATH`: + - `mv dhictl /usr/local/bin/` on _Linux_ and _macOS_ + - Move `dhictl.exe` to a directory in your `PATH` on _Windows_ + +## Usage + +Every command has built-in help accessible with the `--help` flag: + +```bash +docker dhi --help +docker dhi catalog list --help +``` + +### Browse the DHI catalog + +List all available DHI images: + +```bash +docker dhi catalog list +``` + +Filter by type, name, or compliance: + +```bash +docker dhi catalog list --type image +docker dhi catalog list --filter golang +docker dhi catalog list --fips +docker dhi catalog list --stig +``` + +Get details of a specific image, including available tags and CVE counts: + +```bash +docker dhi catalog get +``` + +### Mirror DHI images + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Start mirroring one or more DHI images to your Docker Hub organization: + +```bash +docker dhi mirror start --org my-org \ + -r dhi/golang,my-org/dhi-golang \ + -r dhi/nginx,my-org/dhi-nginx \ + -r dhi/prometheus-chart,my-org/dhi-prometheus-chart +``` + +Mirror with dependencies: + +```bash +docker dhi mirror start --org my-org -r dhi/golang,my-org/dhi-golang --dependencies +``` + +List mirrored images in your organization: + +```bash +docker dhi mirror list --org my-org +``` + +Filter mirrored images by name or type: + +```bash +docker dhi mirror list --org my-org --filter python +docker dhi mirror list --org my-org --type image +docker dhi mirror list --org my-org --type helm-chart +``` + +Stop mirroring one or more images: + +```bash +docker dhi mirror stop dhi-golang --org my-org +docker dhi mirror stop dhi-python dhi-golang --org my-org +``` + +Stop mirroring and delete the repositories: + +```bash +docker dhi mirror stop dhi-golang --org my-org --delete +docker dhi mirror stop dhi-golang --org my-org --delete --force +``` + +### Customize DHI images + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +The CLI can be used to create and manage DHI image customizations. For detailed +instructions on creating customizations using the GUI, see [Customize a Docker +Hardened Image](./customize.md). + +The following is a quick reference for CLI commands. For complete details on all +options and flags, see the +[CLI reference](/reference/cli/docker/dhi/). + +```bash +# Prepare a customization scaffold +docker dhi customization prepare golang 1.25 \ + --org my-org \ + --destination my-org/dhi-golang \ + --name "golang with git" \ + --output my-customization.yaml + +# Create a customization +docker dhi customization create my-customization.yaml --org my-org + +# List customizations +docker dhi customization list --org my-org + +# Filter customizations by name, repository, or source +docker dhi customization list --org my-org --filter git +docker dhi customization list --org my-org --repo dhi-golang +docker dhi customization list --org my-org --source golang + +# Get a customization +docker dhi customization get my-org/dhi-golang "golang with git" --org my-org --output my-customization.yaml + +# Update a customization +# The YAML file must include the 'id' field to identify the customization to update +docker dhi customization edit my-customization.yaml --org my-org + +# Delete a customization +docker dhi customization delete my-org/dhi-golang "golang with git" --org my-org + +# Delete without confirmation prompt +docker dhi customization delete my-org/dhi-golang "golang with git" --org my-org --yes +``` + +### Enterprise package authentication + +{{< summary-bar feature_name="Docker Hardened Images Enterprise" >}} + +Generate authentication credentials for accessing the enterprise hardened +package repository. This is used when configuring your package manager to +install compliance-specific packages in your own images. For detailed +instructions, see [Enterprise +repository](./hardened-packages.md#enterprise-repository). + +```bash +docker dhi auth apk +``` + +### Monitor customization builds + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +List builds for a customization: + +```bash +docker dhi customization build list my-org/dhi-golang "golang with git" --org my-org +docker dhi customization build list my-org/dhi-golang "golang with git" --org my-org --json +``` + +Get details of a specific build: + +```bash +docker dhi customization build get my-org/dhi-golang "golang with git" --org my-org +docker dhi customization build get my-org/dhi-golang "golang with git" --org my-org --json +``` + +View build logs: + +```bash +docker dhi customization build logs my-org/dhi-golang "golang with git" --org my-org +docker dhi customization build logs my-org/dhi-golang "golang with git" --org my-org --json +``` + +### JSON output + +Most list and get commands support a `--json` flag for machine-readable output: + +```bash +docker dhi catalog list --json +docker dhi catalog get golang --json +docker dhi mirror list --org my-org --json +docker dhi mirror start --org my-org -r golang --json +docker dhi customization list --org my-org --json +docker dhi customization build list my-org/dhi-golang "golang with git" --org my-org --json +``` + +## Configuration + +The `docker dhi` CLI can be configured with a YAML file located at: +- `$HOME/.config/dhictl/config.yaml` on _Linux_ and _macOS_ +- `%USERPROFILE%\.config\dhictl\config.yaml` on _Windows_ + +If `$XDG_CONFIG_HOME` is set, the configuration file is located at `$XDG_CONFIG_HOME/dhictl/config.yaml` (see the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir/spec/latest/)). + +Available configuration options: + +| Option | Environment Variable | Description | +|-------------|----------------------|---------------------------------------------------------------------------------------------------------------------------| +| `org` | `DHI_ORG` | Default Docker Hub organization for mirror and customization commands. | +| `api_token` | `DHI_API_TOKEN` | Docker token for authentication. You can generate a token in your [Docker Hub account settings](https://hub.docker.com/). | + +Environment variables take precedence over configuration file values. diff --git a/content/manuals/dhi/how-to/customize.md b/content/manuals/dhi/how-to/customize.md new file mode 100644 index 00000000000..79795d15f60 --- /dev/null +++ b/content/manuals/dhi/how-to/customize.md @@ -0,0 +1,437 @@ +--- +title: Customize a Docker Hardened Image or chart +linkTitle: Customize an image or chart +weight: 25 +keywords: hardened images, DHI, customize, certificate, artifact, helm chart, terraform, infrastructure as code +description: Learn how to customize Docker Hardened Images (DHI) and charts. +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +When you have a DHI Select or DHI Enterprise subscription, you can customize Docker +Hardened Images (DHI) and charts to suit your specific needs using the Docker +Hub web interface. For images, this lets you select a base image, add packages, +add OCI artifacts (such as custom certificates or additional tools), and +configure settings. For charts, this lets you customize the image references. + +Your customizations stay secure automatically. When the base Docker Hardened +Image or chart receives a security patch or your OCI artifacts are updated, +Docker automatically rebuilds your customizations in the background. This +ensures continuous compliance and protection by default, with no manual work +required. The rebuilt artifacts are signed and attested to the same SLSA Build +Level 3 standard as the base images and charts, ensuring a secure and verifiable +supply chain. + +## Customize a Docker Hardened Image + +To add a customized Docker Hardened Image to your organization, an organization +owner must first [mirror](./mirror.md) the DHI repository to your organization +on Docker Hub. Once the repository is mirrored, any user with access to the +mirrored DHI repository can create a customized image. + +You can create customizations using either the DHI CLI or the Docker Hub web interface. + +{{< tabs >}} +{{< tab name="Docker Hub" >}} + +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub**. +1. In the namespace drop-down, select your organization that has a mirrored DHI + repository. +1. Select **Hardened Images** > **Manage** > **Mirrored Images**. +1. For the mirrored DHI repository you want to customize, select the menu icon in the far right column. +1. Select **Customize**. + + At this point, the on-screen instructions will guide you through the + customization process. You can continue with the following steps for more + details. + +1. Select one or more images or Helm charts and versions you want to customize. + + When selecting multiple images and versions, all selections must share the + same distribution and distribution version. For example, you can select + `dhi-node:22_alpine3.23` and `dhi-python:3.13_alpine3.23` together (both + Alpine 3.23), but you cannot mix `dhi-node:22_debian` with Alpine images, or + mix different Alpine versions like `alpine3.23` and `alpine3.22`. + + Alternatively, you can select multiple Helm chart versions to apply the same + customization to all of them. You cannot mix images and Helm charts in the + same customization. + +1. Select **Next**. +1. Optional. Add packages. + + 1. In the packages drop-down, select the packages you want to add to the + image. + + The packages available in the drop-down are OS system packages for the + selected image variant. For version 3.23 Alpine-based images, these are + hardened packages that have been built from source by Docker with + cryptographic signatures and full supply chain security. For version 3.22 + Alpine-based images and Debian-based images, these are standard system + packages. + + 1. In the **OCI artifacts** drop-down, first, select the repository that + contains the OCI artifact image. Then, select the tag you want to use from + that repository. Finally, specify the specific paths you want to include + from the OCI artifact image. + + The OCI artifacts are images that you have previously + built and pushed to a repository in the same namespace as the mirrored + DHI. For example, you can add a custom root CA certificate or another + image that contains a tool you need, like adding Python to a Node.js + image. For more details on how to create an OCI artifact image, see + [Create an OCI artifact image](#create-an-oci-artifact-image-for-image-customization). + + You can add multiple OCI artifact images to a single customization. When + you add more than one, they're applied in the order you add them in the + **OCI artifacts** drop-down. If multiple images contain directories or + files with the same path, images added later overwrite files from images + added earlier. To manage this, you must select paths to include and + optionally exclude from each OCI artifact image. This allows you to + control which files are included in the final customized image. + + By default, no files are included from the OCI artifact image. You must + explicitly include the paths you want. After including a path, you can + then explicitly exclude files or directories underneath it. + + > [!NOTE] + > + > When files necessary for runtime are overwritten by OCI artifacts, the + > image build still succeeds, but you may have issues when running the + > image. + + 1. In the **Scripts** section, you can add, edit, or remove scripts. + + Scripts let you add files to the container image that you can access at runtime. They are not executed during + the build process. This is useful for services that require pre-start initialization, such as setup scripts or + file writes to directories like `/var/lock` or `/out`. + + You must specify the following: + + - The path where the script will be placed + - The script content + - The UID and GID ownership of the script + - The octal file permissions of the script + +1. Select **Next: Configure** to configure the following image settings: + + > [!NOTE] + > + > When customizing multiple images at once, many of these configuration + > options are limited by default and may not be available. + + 1. Specify the [environment variables](/reference/dockerfile/#env) and their + values that the image will contain. + 1. Add [labels](/reference/dockerfile/#label) to the image. + 1. Add [annotations](/build/metadata/annotations/) to the image. + 1. Specify the users to add to the image. When you add a user, a home + directory is automatically created for that user with 0755 permissions. + 1. Specify the user groups to add to the image. + 1. Select which [user](/reference/dockerfile/#user) to run the images as. + 1. Add [`ENTRYPOINT`](/reference/dockerfile/#entrypoint) arguments to the + image. These arguments are appended to the base image's entrypoint. + 1. Add [`CMD`](/reference/dockerfile/#cmd) arguments to the image. These + arguments are appended to the base image's command. + 1. Override the default (`/`) [working + directory](/reference/dockerfile/#workdir) for the image. + 1. Specify a suffix for the customization name that is appended to the + customized image's tag. For example, if you specify `custom` when + customizing the `dhi-python:3.13` image, the customized image will be + tagged as `dhi-python:3.13_custom`. + 1. Select the compression format for the image layers. You can choose between + **ZSTD** (default) or **GZIP** compression. **ZSTD** typically provides + faster image pulls and better compression ratios, but may have + compatibility issues with older software. If you need compatibility with + older Docker versions, use **GZIP**. + 1. Select the platforms you want to build the image for. You must select at + least one platform. + +1. Select **Next: Review customization**. + +1. Select **Create Customization**. + + A summary of the customization appears. It may take some time for the image + to build. Once built, it will appear in the **Tags** tab of the repository, + and your team members can pull it like any other image. + +{{< /tab >}} +{{< tab name="CLI" >}} + +Authenticate with `docker login` using your Docker credentials, a [personal +access token (PAT)](../../security/access-tokens.md) with **Read & Write** +permissions, or an [organization access token +(OAT)](../../enterprise/security/access-tokens.md). When using an OAT, the +available operations depend on the token's permission scope: + +- To list or get customizations, or to view build logs, the OAT must have read + (pull) access to the destination repository. Results are scoped to + repositories the OAT can access. +- To create, update, or delete a customization, the OAT must have push access to + the destination repository. Bulk operations require push access to every + referenced destination repository. + +Use the [`docker dhi customization`](/reference/cli/docker/dhi/customization/) command: + +```console +# Prepare a customization scaffold +$ docker dhi customization prepare golang 1.25 \ + --org my-org \ + --destination my-org/dhi-golang \ + --name "golang with git" \ + --output my-customization.yaml + +# Create a customization +$ docker dhi customization create my-customization.yaml --org my-org + +# List customizations +$ docker dhi customization list --org my-org + +# Filter customizations by name, repository, or source +$ docker dhi customization list --org my-org --filter git +$ docker dhi customization list --org my-org --repo dhi-golang +$ docker dhi customization list --org my-org --source golang + +# Get a customization +$ docker dhi customization get my-org/dhi-golang "golang with git" --org my-org --output my-customization.yaml + +# Update a customization +$ docker dhi customization edit my-customization.yaml --org my-org + +# Delete a customization +$ docker dhi customization delete my-org/dhi-golang "golang with git" --org my-org + +# Delete without confirmation prompt +$ docker dhi customization delete my-org/dhi-golang "golang with git" --org my-org --yes +``` + +{{< /tab >}} +{{< tab name="Terraform" >}} + +You can manage DHI customizations as infrastructure-as-code using the [DHI +Terraform +provider](https://registry.terraform.io/providers/docker-hardened-images/dhi/latest/docs). +If you haven't configured the provider yet, see the Terraform tab in [Mirror a +repository](./mirror.md) for setup instructions. + +Define a `dhi_customization` resource for each customization: + +```hcl +resource "dhi_customization" "golang_with_git" { + repository = "dhi-golang" + name = "golang with git" + + contents { + packages = ["git", "curl"] + } + + platform { + os = "linux" + architecture = "amd64" + } +} +``` + +The `dhi_customization` resource also supports optional configuration blocks +for `accounts`, `files`, `labels`, `annotations`, `environment`, `entrypoint`, +`cmd`, `user`, `workdir`, and `stop_signal`. + +Run `terraform apply` to create the customization. + +To edit a customization, update the resource configuration and run `terraform +apply`. To delete a customization, remove the resource and run `terraform apply`. + +For the full list of resource attributes, see the [Terraform Registry +documentation](https://registry.terraform.io/providers/docker-hardened-images/dhi/latest/docs/resources/customization). + +> [!NOTE] +> +> Monitoring customization builds is not available through the Terraform +> provider. Use the Docker Hub web interface or the DHI CLI to monitor builds. + +{{< /tab >}} +{{< /tabs >}} + +### Monitor customization builds + +{{< tabs >}} +{{< tab name="Docker Hub" >}} + +1. Sign in to [Docker Hub](https://hub.docker.com). +2. Select **My Hub**. +3. In the namespace drop-down, select your organization. +4. Select **Hardened Images** > **Manage**. +5. Select the **Customizations** tab. + +{{< /tab >}} +{{< tab name="CLI" >}} + +List builds for a customization: + +```console +$ docker dhi customization build list my-org/dhi-golang "golang with git" --org my-org +``` + +Get details of a specific build: + +```console +$ docker dhi customization build get my-org/dhi-golang "golang with git" --org my-org +``` + +View build logs: + +```console +$ docker dhi customization build logs my-org/dhi-golang "golang with git" --org my-org +``` + +{{< /tab >}} +{{< /tabs >}} + +### Create an OCI artifact image for image customization + +An OCI artifact image is a Docker image that contains files or directories that +you want to include in your customized Docker Hardened Image (DHI). This can +include additional tools, libraries, or configuration files. + +When creating an image to use as an OCI artifact, it should ideally be as +minimal as possible and contain only the necessary files. + +For example, to distribute a custom root CA certificate as part of a trusted CA +bundle, you can use a multi-stage build. This approach registers your +certificate with the system and outputs an updated CA bundle, which can be +extracted into a minimal final image: + +```dockerfile +# syntax=docker/dockerfile:1 + +FROM dhi.io/bash:5-dev AS certs + +ENV DEBIAN_FRONTEND=noninteractive + +RUN mkdir -p /usr/local/share/ca-certificates/my-rootca +COPY certs/rootCA.crt /usr/local/share/ca-certificates/my-rootca + +RUN update-ca-certificates + +FROM scratch +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +``` + +You can follow this pattern to create other OCI artifacts, such as images +containing tools or libraries that you want to include in your customized DHI. +Install the necessary tools or libraries in the first stage, and then copy the +relevant files to the final stage that uses `FROM scratch`. This ensures that +your OCI artifact is minimal and contains only the necessary files. + +In order for the OCI artifact to be available in a DHI customization, it must be built and +pushed to a repository in the same namespace as the mirrored DHI repository. + +If you're customizing a DHI for multiple platforms (such as `linux/amd64` and +`linux/arm64`), build your OCI artifact for all the platforms using the +`--platform` flag: + +```console +$ docker buildx build --platform linux/amd64,linux/arm64 \ + -t /my-oci-artifact:latest \ + --push . +``` + +This creates a single image manifest that you can use for each platform. The +customization build system automatically selects the correct platform variant +when building each customized image. + +> [!IMPORTANT] +> +> The customization UI will only allow you to select platforms that are +> available in all OCI artifacts you've added. If a platform is missing from +> any OCI artifact, you won't be able to select that platform for your +> customization. + +Once pushed to a repository in your organization's namespace, the OCI artifact +automatically appears in the customization workflow when you select OCI +artifacts to add to your customized Docker Hardened Image. + +#### Best practices for OCI artifacts + +Follow these best practices when creating OCI artifacts for DHI customizations: + +- Use multi-stage builds: Build or install dependencies in a builder stage, + then copy only the necessary files to a `FROM scratch` final stage. This keeps + the OCI artifact minimal and free of unnecessary build tools. + +- Include only essential files: OCI artifacts should contain only the files + you need to add to the customized image. Avoid including package managers, + shells, or other utilities that won't be used in the final image. + +- Match target platforms: Build your OCI artifact for all platforms you plan + to use in your customizations. Use `docker buildx build --platform` to create + multi-platform images when needed. + +- Use specific tags: Tag your OCI artifacts with specific versions or dates + (like `v1.0` or `20250101`) rather than relying solely on `latest`. This + ensures reproducible builds and makes it easier to track which artifacts are + used in which customizations. + +- Enable immutable tags: Consider enabling [immutable + tags](../../docker-hub/repos/manage/hub-images/immutable-tags.md) for your + OCI artifact repositories. This prevents accidental overwrites and ensures that + each version of your OCI artifact remains unchanged, improving reproducibility + and reliability of your customizations. + +## Customize a DHI Helm chart + +You can customize DHI Helm charts to meet your organization's specific needs. +Via the Docker Hub web interface, you can modify the image references to +reference mirrored images or customized images you've created. This lets you +create a custom, securely-built chart with references to images stored in Docker +Hub or other private registries. DHI securely packages customized Helm charts +that reference your repositories, wherever they are stored, by default. + +To customize image references, an organization owner must [mirror](./mirror.md) +the DHI chart repository to your organization on Docker Hub. + +You can create one chart customization per Helm chart repository. This is +different from image customizations, where you can create multiple +customizations per repository. If you need to make changes, you can edit your +existing customization. Alternatively, you can mirror the same Helm chart +repository again and add a new customization to the new mirror. + +> [!NOTE] +> +> You can customize Docker Hardened Image charts like any other Helm chart using +> standard Helm tools and practices, such as a `values.yaml` file, outside of +> Docker Hub. The following instructions describe how to customize image +> references for the chart using the Docker Hub web interface. + +To customize a Docker Hardened Image Helm chart after it has been mirrored: + +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub**. +1. In the namespace drop-down, select your organization that has a mirrored DHI + repository. +1. Select **Hardened Images** > **Manage** > **Mirrored Helm charts**. +1. For the mirrored DHI repository you want to customize, select the **Name**. +1. Select the **Customizations** tab. +1. Select **Create customization**. + + At this point, the on-screen instructions will guide you through the + customization process. + +## Edit or delete a customization + +To edit or delete a DHI or chart customization, follow these steps: + +1. Sign in to [Docker Hub](https://hub.docker.com). +2. Select **My Hub**. +3. In the namespace drop-down, select your organization that has a mirrored repository. +4. Select **Hardened Images** > **Manage**. +5. Select **Customizations**. + +6. For the customized DHI repository you want to manage, select the menu icon in the far right column. + From here, you can: + + - **Edit**: Edit the customization. + - **Create new**: Create a new customization based on the source repository. + - **Delete**: Delete the customization. + +7. Follow the on-screen instructions to complete the edit or deletion. \ No newline at end of file diff --git a/content/manuals/dhi/how-to/explore.md b/content/manuals/dhi/how-to/explore.md new file mode 100644 index 00000000000..258c38ccd20 --- /dev/null +++ b/content/manuals/dhi/how-to/explore.md @@ -0,0 +1,200 @@ +--- +title: Search and evaluate Docker Hardened Images +linktitle: Search and evaluate +description: Learn how to find, compare, and evaluate Docker Hardened Images using the catalog on Docker Hub and Docker Scout comparison tools. +keywords: search docker images, image variants, docker hub catalog, compare docker images, docker scout compare, image comparison, vulnerability comparison +weight: 10 +aliases: + - /dhi/how-to/compare/ +--- + +Docker Hardened Images (DHI) are a curated set of secure, production-ready +container images designed to provide enhanced security, minimized attack +surfaces, and production-ready foundations for your applications. + +This page explains how to search available DHI repositories, review image +metadata, examine variant details, and compare images to evaluate security +improvements and differences. + +## Search the catalog + +You can browse, search, or filter images by category in the [Hardened Image +catalog](https://hub.docker.com/hardened-images/catalog) on Docker Hub. + +Alternatively, use the [DHI CLI](/reference/cli/docker/dhi/), included in +[Docker Desktop](/desktop/), to browse the catalog from the command line: + +```console +$ docker dhi catalog list +``` + +Filter by image type, name, or compliance requirements: + +```console +$ docker dhi catalog list --type image +$ docker dhi catalog list --filter python +$ docker dhi catalog list --fips +$ docker dhi catalog list --stig +``` + +### Repository details + +When you select a repository from the catalog, the repository details page +provides the following: + + - Overview: A brief explanation of the image. + - Guides: Several guides on how to use the image and migrate your existing application. + - Images: Select this option to [view image variants](#image-variants). + - Security summary: Select a tag name to view a quick security summary, + including package count, total known vulnerabilities, and Scout health score. + - Recently pushed tags: A list of recently updated image variants and when they + were last updated. + - Use this image: After selecting an image variant, you can select this option to + view instructions on how to pull and use the image variant. + +To view repository details from the command line, use the DHI CLI: + +```console +$ docker dhi catalog get python +``` + +This shows available tags, CVE counts, and other repository metadata. + +### Image variants + +Tags are used to identify image variants. Image variants are different builds of +the same application or framework tailored for different use-cases. + +From the [repository details](#repository-details), select **Images** to view +the available image variants. + +The **Images** page provides a table with the following columns: + +- Image version: The image name with its base distribution (for example, `debian + 13`) and associated tags. +- Type: The support lifecycle status of the variant. +- Compliance: Relevant compliance designations. For example, `CIS`, `FIPS`, or + `STIG (100%)`. +- Package manager: Whether a package manager is available. A checkmark indicates + a package manager is present (for example, `apt` or `apk`), a dash indicates + none. +- Shell: Whether a shell is available. A checkmark indicates a shell is present + (for example, `bash` or `busybox`), a dash indicates none. +- User: The user that the container runs as. For example, `root` or `nonroot + (65532)`. +- Last pushed: When the image variant was last updated. +- Vulnerabilities: Vulnerability counts by severity level. +- Health: The Scout health score. Select the score to view more details. + +### Image variant details + +On the [**Images** page](#image-variants), select an image version from the +table to view detailed information about that specific variant. + +The image variant details page provides the following information: + +- Packages: A list of all packages included in the image variant. This section + includes details about each package, including its name, version, + distribution, and licensing information. +- Specifications: The specifications for the image variant include the following + key details: + - Source & Build Information: The image is built from the Dockerfile found + here and the Git commit. + - Build parameters + - Entrypoint + - CMD + - User + - Working directory + - Environment Variables + - Labels + - Platform +- Vulnerabilities: The vulnerabilities section provides a list of known CVEs for + the image variant, including: + - CVE + - Severity + - Package + - Fix version + - Last detected + - Status + - Suppressed CVEs +- Attestations: Variants include comprehensive security attestations to verify + the image's build process, contents, and security posture. These attestations + are signed and can be verified using cosign. For a list of available + attestations, see [Attestations](../core-concepts/attestations.md). + +## Compare and evaluate images + +Docker Scout lets you analyze the differences between two images. Comparing a +DHI to a standard image helps you understand the security improvements, package +differences, and overall benefits of adopting hardened images. + +Comparison is useful for: + +- Evaluating the security improvements when migrating from a standard image to a + DHI +- Understanding package and vulnerability differences between image variants +- Assessing the impact of customizations or updates + +### Prerequisites + +Before comparing images: + +- Install [Docker Desktop](/desktop/) to use Docker Scout comparison features. +- Sign in to the registries containing the images you want to compare. Sign in + to `dhi.io` for Docker Hardened Images: + + ```console + $ docker login dhi.io + ``` + +### Basic comparison + +To compare a Docker Hardened Image with another image, use the [`docker scout +compare`](/reference/cli/docker/scout/compare/) command: + +```console +$ docker scout compare dhi.io/: \ + --to : \ + --platform +``` + +For example, to compare a DHI Node.js image with the official Node.js image: + +```console +$ docker scout compare dhi.io/node:22-debian13 \ + --to node:22 \ + --platform linux/amd64 +``` + +The output shows an overview at the top with key comparison metrics, followed by +detailed package and vulnerability information. Example overview: + +```console + ## Overview + + │ Analyzed Image │ Comparison Image + ────────────────────┼───────────────────────────────────────────────────────┼───────────────────────────────────────────── + Target │ dhi.io/node:22-debian13 │ node:22 + digest │ 55d471f61608 │ 9ee3220f602f + platform │ linux/amd64 │ linux/amd64 + vulnerabilities │ 0C 0H 0M 0L │ 0C 1H 3M 153L 4? + │ -1 -3 -153 -4 │ + size │ 41 MB (-367 MB) │ 408 MB + packages │ 19 (-726) │ 745 +``` + +### Filter unchanged packages + +To focus only on the differences and ignore unchanged packages, use the +`--ignore-unchanged` flag: + +```console +$ docker scout compare dhi.io/node:22-debian13 \ + --to node:22 \ + --platform linux/amd64 \ + --ignore-unchanged +``` + +This output highlights only the packages and vulnerabilities that differ between +the two images, making it easier to identify the security improvements and +changes. diff --git a/content/manuals/dhi/how-to/hardened-packages.md b/content/manuals/dhi/how-to/hardened-packages.md new file mode 100644 index 00000000000..7f078fc6ccc --- /dev/null +++ b/content/manuals/dhi/how-to/hardened-packages.md @@ -0,0 +1,292 @@ +--- +title: Use Hardened System Packages +linkTitle: Use hardened packages +weight: 32 +keywords: hardened images, DHI, hardened packages, packages, alpine +description: Learn how to use and verify Docker's hardened system packages in your images. +--- + +Docker Hardened System Packages are built from source by Docker. This ensures +supply chain integrity throughout your entire image stack by eliminating risks +from potentially compromised public packages. + +Access to hardened packages varies by subscription: + +- **DHI Community**: Includes hardened packages in base images. Can configure the + public package repository to access the same packages in custom images. +- **DHI Select**: Includes all Community packages, plus access to additional + compliance-specific packages (such as FIPS variants) and Docker-patched + packages through the image customization UI. +- **DHI Enterprise**: Includes all Select packages, plus the ability to configure + the enterprise package repository directly in your own images for full access + to compliance and security-patched packages. + +## Built-in packages + +Supported distributions of Docker Hardened Images (DHI) automatically include +hardened system packages. No additional configuration is required. Simply pull +and use the images as normal. + +All packages in these images are built by Docker from source, maintaining +the same security standards as the base images themselves. + +## Add hardened packages to your images + +You can add hardened packages to your own images in the following two ways. + +### Add packages through image customization + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +When customizing Docker Hardened Images with DHI Select or DHI Enterprise, you +can add hardened packages for Alpine-based images through the customization +interface. Follow the steps to [create an image +customization](./customize.md#create-an-image-customization) and select hardened +packages during the customization process. + +### Configure the package manager + +You can configure your package manager to pull from Docker's hardened package +repositories. This lets you install hardened packages in your own images. + +#### Public repository + +To use Docker's public hardened package repository in your own images, configure +the Alpine package manager in your Dockerfile. + +The configuration process involves three steps: + +1. Install the [signing key](https://github.com/docker-hardened-images/keyring) +2. Configure the package repository +3. Update and install packages + +The following example shows how to configure the Alpine package manager in your +Dockerfile to use Docker's public hardened package repository: + +```dockerfile +FROM alpine:3.23 + +# Install the signing key +RUN cd /etc/apk/keys && \ + wget https://dhi.io/keyring/dhi-apk@docker-0F81AD7700D99184.rsa.pub + +# Replace the default repositories with the hardened package repository +RUN echo "https://dhi.io/apk/alpine/v3.23/main" > /etc/apk/repositories + +# Update and install packages +RUN apk update && \ + apk add libpng +``` + +Replace `3.23` with your Alpine version in both the base image tag and repository URL. + +To verify the configuration, build and run the image: + +```console +$ docker build -t myapp:latest . +$ docker run -it myapp:latest sh +``` + +Inside the container, check the configured repositories: + +```console +/ # cat /etc/apk/repositories +https://dhi.io/apk/alpine/v3.23/main +``` + +This ensures all packages are installed from Docker's hardened repository. + +All packages installed from the Docker Hardened Images repository are built from +source by Docker and include full provenance. + +#### Enterprise repository + +{{< summary-bar feature_name="Docker Hardened Images Enterprise" >}} + +With DHI Enterprise, you have access to an additional package +repository that includes hardened packages for compliance variants such as FIPS, +as well as additional security patches. + +The configuration process involves five steps: + +1. Install the [signing key](https://github.com/docker-hardened-images/keyring) +2. Configure the base package repository +3. Install the enterprise configuration package +4. Configure package installation with authentication +5. Build the image passing credentials as a secret using the DHI CLI + + > [!NOTE] + > + > You must have the Docker Hardened Images CLI installed and configured. For + > more information, see [Use the DHI CLI](./cli.md). + +The following example shows how to configure the Alpine package manager in your +Dockerfile to use Docker's enterprise hardened package repository: + +```dockerfile +FROM alpine:3.23 + +# Install the signing key +RUN cd /etc/apk/keys && \ + wget https://dhi.io/keyring/dhi-apk@docker-0F81AD7700D99184.rsa.pub + +# Replace the default repositories with the hardened package repository +RUN echo "https://dhi.io/apk/alpine/v3.23/main" > /etc/apk/repositories + +# Update and install the enterprise configuration package to add the security repository +RUN apk update && \ + apk add dhi-enterprise-conf + +# Install packages from the security repository with authentication +RUN --mount=type=secret,id=http_auth \ + HTTP_AUTH="$(cat /run/secrets/http_auth)" \ + apk update && \ + apk add openssl-fips +``` + +Build the image with authentication passed securely as a build secret: + +```console +$ dhictl auth apk > http_auth.txt +$ docker build --secret id=http_auth,src=http_auth.txt -t myapp-enterprise:latest . +$ rm http_auth.txt +``` + +The `--secret` flag securely mounts the authentication credentials during build +without storing them in the image layers or metadata. + +## Verify packages + +Every hardened package is cryptographically signed and includes metadata that +proves its provenance and build integrity. You can verify the signatures and +view the metadata to ensure your packages come from Docker's trusted build +infrastructure. + +### View package metadata + +To view information about a hardened package, including its provenance: + +```console +$ apk info -L +``` + +This shows the files included in the package and its metadata. + +### Verify package signatures + +Hardened packages are cryptographically signed by Docker. When you install the +signing keys and configure your package manager as described previously, the +package manager automatically verifies signatures during installation. + +If a package fails signature verification, the package manager will refuse to +install it, protecting you from tampered or compromised packages. + +### Build provenance and cryptographic verification + +Docker hardened packages are built by Docker's trusted infrastructure and include +verifiable metadata and cryptographic signatures. + +To view this metadata for an installed package: + +```console +$ apk info -a +``` + +Or to view metadata for a package before installing: + +```console +$ apk fetch --stdout | tar -xzO .PKGINFO +``` + +The package signing keys ensure that packages haven't been tampered with after +being built. When you install the signing key and configure your package manager, +all packages are automatically verified before installation. + +### Package attestations + +Each hardened package includes its own attestations, similar to [image +attestations](./verify.md). These attestations provide provenance and build +information for individual packages, allowing you to trace the supply chain down +to the package level. + +You can retrieve package attestations by first extracting package information +from the image's SLSA provenance, then using the package digest to access its +attestations. + +#### Extract package information from image attestations + +To get provenance information for a specific package from an image's SLSA +provenance attestation, you first need to retrieve the image's provenance and +then filter for the specific package you're interested in. + +The SLSA provenance attestation includes a `materials` array that lists all +build inputs, including packages. You can use `jq` to filter this array for a +specific package: + +```console +$ docker scout attest get dhi.io/golang:1.26-alpine3.23 \ + --predicate-type https://slsa.dev/provenance/v0.2 | \ + jq '.predicate.materials[] | select( .uri == "https://dhi.io/apk/alpine/v3.23/main/aarch64/golang-1.26-1.26.0-r0.apk" )' +``` + +Replace the package URI in the `select()` filter with the specific package +you're looking for. You can find available packages by first running the command +without the `select()` filter to see all materials. + +This returns the package URI and its SHA-256 digest: + +```json +{ + "uri": "https://dhi.io/apk/alpine/v3.23/main/aarch64/golang-1.26-1.26.0-r0.apk", + "digest": { + "sha256": "4082a2500abc2e7b8435f9398d3514d760044fa52ca3d10cf80015469124a838" + } +} +``` + +#### List attestations for a package + +Using the package digest from the previous section, you can list all available +attestations for that package: + +```console +$ curl -s https://dhi.io/apk/alpine/v3.23/main/sha256:4082a2500abc2e7b8435f9398d3514d760044fa52ca3d10cf80015469124a838/attestations/list | jq . +``` + +This returns information about the package and its available attestations: + +```json +{ + "subject": { + "name": "pkg:apk/alpine/golang-1.26@1.26.0-r0?os_name=&os_version=", + "digest": { + "sha256": "4082a2500abc2e7b8435f9398d3514d760044fa52ca3d10cf80015469124a838" + } + }, + "attestations": [ + { + "predicate_type": "https://slsa.dev/provenance/v1", + "digest": { + "sha256": "97c919cf0edb27087739bbabeea4c1ef88d069cd41791476ba64b69280d63a32" + }, + "url": "https://dhi.io/apk/alpine/v3.23/main/sha256:4082a2500abc2e7b8435f9398d3514d760044fa52ca3d10cf80015469124a838/attestations/sha256:97c919cf0edb27087739bbabeea4c1ef88d069cd41791476ba64b69280d63a32" + } + ] +} +``` + +#### Retrieve package attestations + +To retrieve the actual attestation content, use the URL provided in the +attestation list: + +```console +$ curl -s https://dhi.io/apk/alpine/v3.23/main/sha256:4082a2500abc2e7b8435f9398d3514d760044fa52ca3d10cf80015469124a838/attestations/sha256:97c919cf0edb27087739bbabeea4c1ef88d069cd41791476ba64b69280d63a32 | jq . +``` + +This returns the full SLSA provenance attestation for the package, which +includes information about how the package was built, its dependencies, and +other build materials. + +You can continue this process recursively to trace the supply chain all the way +down to the compiler and other build tools used to create the package. diff --git a/content/manuals/dhi/how-to/helm.md b/content/manuals/dhi/how-to/helm.md new file mode 100644 index 00000000000..6fac70ba5d1 --- /dev/null +++ b/content/manuals/dhi/how-to/helm.md @@ -0,0 +1,138 @@ +--- +title: Use a Docker Hardened Image chart +linktitle: Use a Helm chart +description: Learn how to use a Docker Hardened Image chart. +keywords: use hardened image, helm, k8s, kubernetes, dhi chart, chart +weight: 31 +--- + +Docker Hardened Image (DHI) charts are Docker-provided [Helm +charts](https://helm.sh/docs/) built from upstream sources, designed for +compatibility with Docker Hardened Images. These charts are available as OCI +artifacts within the DHI catalog on Docker Hub. For more details, see [Docker +Hardened Image charts](/dhi/features/helm/). + +DHI charts incorporate multiple layers of supply chain security that aren't present in upstream charts: + +- SLSA Level 3 compliance: Each chart is built with SLSA Build Level 3 standards, including detailed build provenance +- Software Bill of Materials (SBOMs): Comprehensive SBOMs detail all components referenced within the chart +- Cryptographic signing: All associated metadata is cryptographically signed by Docker for integrity and authenticity +- Hardened configuration: Charts automatically reference Docker Hardened Images for secure deployments +- Tested compatibility: Charts are robustly tested to work out-of-the-box with Docker Hardened Images + +You can use a DHI chart like any other Helm chart stored in an OCI registry. +When you have a Docker Hardened Images subscription, you can also customize DHI +charts to reference customized images and mirrored repositories. The customized +chart build pipeline ensures that your customizations are built securely, use +the latest base charts, and include attestations. + +## Find a Docker Helm chart + +To find a Docker Helm chart for DHI: + +1. Go to the Hardened Images catalog in [Docker Hub](https://hub.docker.com/hardened-images/catalog) and sign in. +2. In the left sidebar, select **Hardened Images** > **Catalog**. +3. Select **Filter by** for **Helm Charts**. +4. Select a Helm chart repository to view its details. + +## Mirror a Helm chart and/or its images to a third-party registry + +If you want to mirror to your own third-party registry, you can follow the +instructions in [Mirror a Docker Hardened Image repository](/dhi/how-to/mirror/) for either the +chart, the image, or both. + +The same `regctl` tool that is used for mirroring container images can also be used for mirroring Helm charts, as Helm +charts are OCI artifacts. + +For example: + +```console +regctl image copy \ + "${SRC_CHART_REPO}:${TAG}" \ + "${DEST_REG}/${DEST_CHART_REPO}:${TAG}" \ + --referrers \ + --referrers-src "${SRC_ATT_REPO}" \ + --referrers-tgt "${DEST_REG}/${DEST_CHART_REPO}" \ + --force-recursive +``` + +## Create a Kubernetes secret for pulling images + +You need to create a Kubernetes secret for pulling images from `dhi.io`, Docker +Hub, or your own registry. This is necessary because Docker Hardened Image +repositories require authentication. If you mirror the images to your own +registry, you still need to create this secret if the registry requires +authentication. + +1. For `dhi.io` or Docker Hub, create a [personal access token + (PAT)](/security/access-tokens/) using your Docker account or an + [organization access token (OAT)](/enterprise/security/access-tokens/). + Ensure the token has at least read-only access to the Docker Hardened Image + repositories. +2. Create a secret in Kubernetes using the following command. Replace ``, ``, + ``, and `` with your own values. + + > [!NOTE] + > + > You need to create this secret in each Kubernetes namespace that uses a + > DHI. If you've mirror your DHIs to another registry, replace + > `dhi.io` with your registry's hostname. Replace + > ``, ``, and `` with your own + > values. `` is Docker ID if using a PAT or your organization + > name if using an OAT. `` is a name you choose for the + > secret. + + ```console + $ kubectl create secret docker-registry \ + --docker-server=dhi.io \ + --docker-username= \ + --docker-password= \ + --docker-email= + ``` + + For example: + + ```console + $ kubectl create secret docker-registry dhi-pull-secret \ + --docker-server=dhi.io \ + --docker-username=docs \ + --docker-password=dckr_pat_12345 \ + --docker-email=moby@example.com + ``` + +## Install a Helm chart + +To install a Helm chart from Docker Hardened Images: + +1. Sign in to the registry using Helm: + + ```console + $ echo $ACCESS_TOKEN | helm registry login dhi.io --username --password-stdin + ``` + + Replace `` and set `$ACCESS_TOKEN`. + +2. Install the chart using `helm install`. Optionally, you can also use the `--dry-run` flag to test the installation without + actually installing anything. + + + ```console + $ helm install oci://dhi.io/ --version \ + --set "imagePullSecrets[0].name=" + ``` + + Replace `` and `` accordingly. If the + chart is in your own registry or another repository, replace + `dhi.io/` with your own location. Replace + `` with the name of the image pull secret created + from [Create a Kubernetes secret for pulling images](#create-a-kubernetes-secret-for-pulling-images). + +## Customize a Helm chart + +You can customize Docker Hardened Image Helm charts to reference customized +images and mirrored repositories. For more details, see [Customize Docker +Hardened Images and charts](./customize.md). + +## Verify a Helm chart and view its attestations + +You can verify Helm charts. For more details, see [Verify Helm chart attestations](./verify.md#verify-helm-chart-attestations-with-docker-scout). \ No newline at end of file diff --git a/content/manuals/dhi/how-to/mirror.md b/content/manuals/dhi/how-to/mirror.md new file mode 100644 index 00000000000..d217c00d453 --- /dev/null +++ b/content/manuals/dhi/how-to/mirror.md @@ -0,0 +1,452 @@ +--- +title: Mirror a Docker Hardened Image repository +linktitle: Mirror a repository +description: Learn how to mirror an image into your organization's namespace and optionally push it to another private registry. +weight: 20 +keywords: mirror docker image, private container registry, docker hub automation, webhook image sync, secure image distribution, internal registry, jfrog artifactory, harbor registry, amazon ecr, google artifact registry, github container registry, terraform, infrastructure as code +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Mirroring requires a DHI Select or Enterprise subscription. Without a +subscription, you can pull Docker Hardened Images directly from `dhi.io` without +mirroring. With a DHI Select or Enterprise subscription, you must mirror to your +organization to get: + +- Compliance variants (FIPS-enabled or STIG-ready images) +- Extended Lifecycle Support (ELS) variants (requires add-on) +- Image or Helm chart customization +- Air-gapped or restricted network environments +- [SLA-backed security updates](https://docs.docker.com/go/dhi-sla/) + +## How to mirror + +This topic covers two types of mirroring for Docker Hardened Image (DHI) +repositories: + +- [Mirror to your organization](#mirror-a-dhi-repository-to-your-organization): + Mirror a DHI repository to your organization's namespace on Docker Hub. + +- [Mirror to a third-party + registry](#mirror-a-dhi-repository-to-a-third-party-registry): Mirror a + repository to another container registry, such as Amazon ECR, Google Artifact + Registry, or a private Harbor instance. + +## Mirror a DHI repository to your organization + +To mirror repositories, you must be an organization owner or editor, or use a +personal access token (PAT) or organization access token (OAT). See the CLI and +Terraform tabs in the following sections for required permission scopes. + +- Image repositories: Mirroring lets you customize images by adding packages, + OCI artifacts (such as custom certificates or additional tools), environment + variables, labels, and other configuration settings. For more details, see + [Customize a Docker Hardened Image](./customize.md#customize-a-docker-hardened-image). + +- Chart repositories: Mirroring lets you customize image references within + the chart. This is particularly useful when using customized images or when + you've mirrored images to a third-party registry and need the chart to + reference those custom locations. For more details, see [Customize a Docker + Hardened Helm chart](./customize.md#customize-a-docker-hardened-helm-chart). + +{{< tabs >}} +{{< tab name="Docker Hub" >}} + +1. Go to [Docker Hub](https://hub.docker.com) and sign in. +2. Select **My Hub**. +3. In the namespace drop-down, select your organization. +4. Select **Hardened Images** > **Catalog**. +5. Select a DHI repository to view its details. +6. Mirror the repository: + - To mirror an image repository, select **Use this image** > **Mirror + repository**, and then follow the on-screen instructions. If you have the ELS add-on, you can also + select **Enable support for end-of-life versions**. + - To mirror a Helm chart repository, select **Get Helm chart**, and then follow the on-screen instructions. + +It may take a few minutes for all the tags to finish mirroring. + +{{< /tab >}} +{{< tab name="CLI" >}} + +Authenticate with `docker login` using your Docker credentials, a [personal +access token (PAT)](../../security/access-tokens.md) with **Read & Write** +permissions, or an [organization access token +(OAT)](../../enterprise/security/access-tokens.md). When using an OAT, the +available operations depend on the token's permission scope: + +- To list mirrored repositories, the OAT must have read (pull) access to the + relevant repositories. Results are scoped to repositories the OAT can access. +- To create a mirror to an existing destination repository, the OAT must have + push access to that repository. To create a mirror to a new destination + repository that doesn't yet exist, the OAT must have org-wide repository + access (for example, `/*` with pull or push). Repository-scoped access to + the future repository name is not sufficient. +- To stop mirroring, the OAT must have push access to the relevant repository. +- OATs with public repository read-only access cannot list or manage mirrored + repositories. + +Use the [`docker dhi mirror`](/reference/cli/docker/dhi/mirror/) command: + +```console +$ docker dhi mirror start --org my-org \ + -r dhi/golang,my-org/dhi-golang \ + -r dhi/nginx,my-org/dhi-nginx \ + -r dhi/prometheus-chart,my-org/dhi-prometheus-chart +``` + +Mirror with dependencies: + +```console +$ docker dhi mirror start --org my-org -r dhi/golang,my-org/dhi-golang --dependencies +``` + +List mirrored images in your organization: + +```console +$ docker dhi mirror list --org my-org +``` + +Filter mirrored images by name or type: + +```console +$ docker dhi mirror list --org my-org --filter python +$ docker dhi mirror list --org my-org --type image +$ docker dhi mirror list --org my-org --type helm-chart +``` + +{{< /tab >}} +{{< tab name="Terraform" >}} + +You can manage DHI mirrors as infrastructure-as-code using the [DHI Terraform +provider](https://registry.terraform.io/providers/docker-hardened-images/dhi/latest/docs). + +First, install and configure the provider: + +```hcl +terraform { + required_providers { + dhi = { + source = "docker-hardened-images/dhi" + } + } +} + +provider "dhi" { + docker_hub_username = var.docker_username + docker_hub_password = var.docker_password + organization = var.org_name +} +``` + +> [!NOTE] +> +> Instead of specifying credentials in the provider block, you can set the +> `DOCKER_USERNAME`, `DOCKER_PASSWORD`, and `DHI_ORG` environment variables. You +> can also authenticate using an organization access token (OAT) in place of a +> password. Set `DOCKER_USERNAME` to your organization namespace and +> `DOCKER_PASSWORD` to the OAT. When using an OAT, the same permission scopes +> apply as with the CLI: read (pull) access is required to list mirrors, and +> push access is required to create or delete them. + +Then, define a `dhi_mirror` resource for each repository you want to mirror: + +```hcl +resource "dhi_mirror" "golang" { + source_namespace = "dhi" + source_name = "golang" + destination_name = "dhi-golang" +} + +resource "dhi_mirror" "nginx" { + source_namespace = "dhi" + source_name = "nginx" + destination_name = "dhi-nginx" +} +``` + +To enable Extended Lifecycle Support (ELS) variants, set the `els` attribute: + +```hcl +resource "dhi_mirror" "golang" { + source_namespace = "dhi" + source_name = "golang" + destination_name = "dhi-golang" + els = true +} +``` + +Run `terraform apply` to create the mirrors. + +For the full list of resource attributes, see the [Terraform Registry +documentation](https://registry.terraform.io/providers/docker-hardened-images/dhi/latest/docs/resources/mirror). + +{{< /tab >}} +{{< /tabs >}} + +After mirroring, the repository appears in your organization's repository list, +prefixed by `dhi-`, and continues to receive updated images. It behaves like any +other Docker Hub repository, so you can manage access and permissions, configure +webhooks, and use other standard Hub features. See [Docker Hub +repositories](/manuals/docker-hub/repos/_index.md) for details. + +### Stop mirroring a repository + +After you stop mirroring, the repository remains, but it no longer receives +updates. You can still use the last images or charts that were mirrored. + +> [!NOTE] +> +> If you only want to stop mirroring ELS versions, you can clear the ELS +> option in the mirrored repository's **Settings** tab. + +{{< tabs >}} +{{< tab name="Docker Hub" >}} + +1. Go to [Docker Hub](https://hub.docker.com) and sign in. +2. Select **My Hub**. +3. In the namespace drop-down, select your organization that has access to DHI. +4. Select **Hardened Images** > **Manage**. +5. Select the **Mirrored Images** or **Mirrored Helm charts** tab. +6. In the far right column of the repository you want to stop mirroring, select the menu icon. +7. Select **Stop mirroring**. + +{{< /tab >}} +{{< tab name="CLI" >}} + +Authenticate with `docker login` using your Docker credentials, a [personal +access token (PAT)](../../security/access-tokens.md) with **Read & Write** +permissions, or an [organization access token +(OAT)](../../enterprise/security/access-tokens.md) with push access to the +relevant repository. + +Use the [`docker dhi mirror`](/reference/cli/docker/dhi/mirror/) command: + +```console +$ docker dhi mirror stop --org my-org dhi-golang +``` + +{{< /tab >}} +{{< tab name="Terraform" >}} + +To stop mirroring, remove the `dhi_mirror` resource from your Terraform +configuration and run `terraform apply`. The repository remains in your +organization but no longer receives updates. + +{{< /tab >}} +{{< /tabs >}} + +## Mirror a DHI repository to a third-party registry + +After mirroring a DHI repository to your organization on Docker Hub, you can +optionally mirror it to another container registry, such as Amazon ECR, Google +Artifact Registry, GitHub Container Registry, or a private Harbor instance. + +You can use any standard workflow to mirror the image, such as the +[Docker CLI](/reference/cli/docker/), [Docker Hub Registry +API](/reference/api/registry/latest/), third-party registry tools, or CI/CD +automation. + +However, to preserve the full security context, including attestations, you must +also mirror its associated OCI artifacts. DHI repositories store the image +layers on `dhi.io` (or `docker.io` for customized images) and the signed +attestations in a separate registry (`registry.scout.docker.com`). + +To copy both, you can use [`regctl`](https://regclient.org/cli/regctl/), an +OCI-aware CLI that supports mirroring images along with attached artifacts such +as SBOMs, vulnerability reports, and SLSA provenance. For ongoing synchronization, +you can use [`regsync`](https://regclient.org/cli/regsync/). + +### Automate syncing with webhooks + +To keep external registries or systems in sync with your mirrored Docker +Hardened Images, and to receive notifications when updates occur, you can +configure a [webhook](/docker-hub/repos/manage/webhooks/) on the mirrored +repository in Docker Hub. A webhook sends a `POST` request to a URL you define +whenever a new image tag is pushed or updated. + +For example, you might configure a webhook to call a CI/CD system at +`https://ci.example.com/hooks/dhi-sync` whenever a new tag is mirrored. The +automation triggered by this webhook can pull the updated image from Docker Hub +and push it to an internal registry such as Amazon ECR, Google Artifact +Registry, or GitHub Container Registry. + +Other common webhook use cases include: + +- Triggering validation or vulnerability scanning workflows +- Signing or promoting images +- Sending notifications to downstream systems + +#### Example webhook payload + +When a webhook is triggered, Docker Hub sends a JSON payload like the following: + +```json{collapse=true} +{ + "callback_url": "https://registry.hub.docker.com/u/exampleorg/dhi-python/hook/abc123/", + "push_data": { + "pushed_at": 1712345678, + "pusher": "trustedbuilder", + "tag": "3.13-alpine3.21" + }, + "repository": { + "name": "dhi-python", + "namespace": "exampleorg", + "repo_name": "exampleorg/dhi-python", + "repo_url": "https://hub.docker.com/r/exampleorg/dhi-python", + "is_private": true, + "status": "Active", + ... + } +} +``` + +### Example mirroring with `regctl` + +The following example shows how to mirror a specific tag of a Docker Hardened +Image from Docker Hub to another registry, along with its associated +attestations using `regctl`. You must [install +`regctl`](https://github.com/regclient/regclient) first. + +The example assumes you have mirrored the DHI repository to your organization's +namespace on Docker Hub as described in the previous section. You can apply the +same steps to a non-mirrored image by updating the `SRC_ATT_REPO` and +`SRC_REPO` variables accordingly. + +1. Set environment variables for your specific environment. Replace the + placeholders with your actual values. + + In this example, you use a Docker username to represent a member of the Docker + Hub organization that the DHI repositories are mirrored in. Prepare a + [personal access token (PAT)](../../security/access-tokens.md) for the user + with `read only` access. Alternatively, you can use your organization name and + an [organization access token + (OAT)](../../enterprise/security/access-tokens.md) to authenticate with `docker.io`. + Note that OATs are not supported for `registry.scout.docker.com`. If your + workflow requires authenticating to the Scout registry, use a personal access + token (PAT) for that step. + + > [!WARNING] + > + > The following examples export credentials directly on the command line for + > demonstration purposes. This exposes sensitive tokens in your shell history + > and process list. In production environments, use secure methods such as + > reading from files with restricted permissions, environment files loaded + > at runtime, or secret management tools. + + ```console + $ export DOCKER_USERNAME="YOUR_DOCKER_USERNAME" + $ export DOCKER_PAT="YOUR_DOCKER_PAT" + $ export DOCKER_ORG="YOUR_DOCKER_ORG" + $ export DEST_REG="registry.example.com" + $ export DEST_REPO="mirror/dhi-python" + $ export DEST_REG_USERNAME="YOUR_DESTINATION_REGISTRY_USERNAME" + $ export DEST_REG_TOKEN="YOUR_DESTINATION_REGISTRY_TOKEN" + $ export SRC_REPO="docker.io/${DOCKER_ORG}/dhi-python" + $ export SRC_ATT_REPO="registry.scout.docker.com/${DOCKER_ORG}/dhi-python" + $ export TAG="3.13-alpine3.21" + ``` + +2. Sign in via `regctl` to Docker Hub, the Scout registry that contains + the attestations, and your destination registry. + + ```console + $ echo $DOCKER_PAT | regctl registry login -u "$DOCKER_USERNAME" --pass-stdin docker.io + $ echo $DOCKER_PAT | regctl registry login -u "$DOCKER_USERNAME" --pass-stdin registry.scout.docker.com + $ echo $DEST_REG_TOKEN | regctl registry login -u "$DEST_REG_USERNAME" --pass-stdin "$DEST_REG" + ``` + +3. Mirror the image and attestations using `--referrers` and referrer endpoints: + + ```console + $ regctl image copy \ + "${SRC_REPO}:${TAG}" \ + "${DEST_REG}/${DEST_REPO}:${TAG}" \ + --referrers \ + --referrers-src "${SRC_ATT_REPO}" \ + --referrers-tgt "${DEST_REG}/${DEST_REPO}" \ + --force-recursive + ``` + +4. Verify that artifacts were preserved. + + First, get a digest for a specific tag and platform. For example, `linux/amd64`. + + ```console + DIGEST="$(regctl manifest head "${DEST_REG}/${DEST_REPO}:${TAG}" --platform linux/amd64)" + ``` + + List attached artifacts (SBOM, provenance, VEX, vulnerability reports). + + ```console + $ regctl artifact list "${DEST_REG}/${DEST_REPO}@${DIGEST}" + ``` + + Or, list attached artifacts with `docker scout`. + + ```console + $ docker scout attest list "registry://${DEST_REG}/${DEST_REPO}@${DIGEST}" + ``` + +### Example ongoing mirroring with `regsync` + +`regsync` automates pulling from your organizations mirrored DHI repositories on +Docker Hub and pushing to your external registry including attestations. It +reads a YAML configuration file and can filter tags. + +The following example uses a `regsync.yaml` file that syncs Node 24 and Python +3.12 Debian 13 variants, excluding Alpine and Debian 12. + +```yaml{title="regsync.yaml",collapse=true} +version: 1 +# Optional: inline creds if not relying on prior CLI logins +# creds: +# - registry: docker.io +# user: +# pass: "{{file \"/run/secrets/docker_token\"}}" +# - registry: registry.scout.docker.com +# user: +# pass: "{{file \"/run/secrets/docker_token\"}}" +# - registry: registry.example.com +# user: +# pass: "{{file \"/run/secrets/dest_token\"}}" + +sync: + - source: docker.io//dhi-node + target: registry.example.com/mirror/dhi-node + type: repository + fastCopy: true + referrers: true + referrerSource: registry.scout.docker.com//dhi-node + referrerTarget: registry.example.com/mirror/dhi-node + tags: + allow: [ "24.*" ] + deny: [ ".*alpine.*", ".*debian12.*" ] + + - source: docker.io//dhi-python + target: registry.example.com/mirror/dhi-python + type: repository + fastCopy: true + referrers: true + referrerSource: registry.scout.docker.com//dhi-python + referrerTarget: registry.example.com/mirror/dhi-python + tags: + allow: [ "3.12.*" ] + deny: [ ".*alpine.*", ".*debian12.*" ] +``` + +To do a dry run with the configuration file, you can run the following command. +You must [install `regsync`](https://github.com/regclient/regclient) first. + +```console +$ regsync check -c regsync.yaml +``` + +To run the sync with the configuration file: + +```console +$ regsync once -c regsync.yaml +``` + +## What next + +After mirroring, see [Pull a DHI](./use.md#pull-a-dhi) to learn how to pull and use mirrored images. diff --git a/content/manuals/dhi/how-to/policies.md b/content/manuals/dhi/how-to/policies.md new file mode 100644 index 00000000000..80f9589bc02 --- /dev/null +++ b/content/manuals/dhi/how-to/policies.md @@ -0,0 +1,115 @@ +--- +title: Enforce Docker Hardened Image usage with policies +linktitle: Enforce image usage +description: Learn how to use image policies with Docker Scout for Docker Hardened Images. +weight: 50 +keywords: docker scout policies, enforce image compliance, container security policy, image provenance, vulnerability policy check +--- + +When you have a Docker Hardened Images Enterprise subscription, mirroring a +Docker Hardened Image (DHI) repository automatically enables [Docker +Scout](/scout/), allowing you to start enforcing security and compliance +policies for your images without additional setup. Using Docker Scout policies, +you can define and apply rules that ensure only approved and secure images, such +as those based on DHIs, are used across your environments. + +Docker Scout includes a dedicated [**Valid Docker Hardened Image (DHI) or DHI +base +image**](../../scout/policy/_index.md#valid-docker-hardened-image-dhi-or-dhi-base-image) +policy type that validates whether your images are Docker Hardened Images or are +built using a DHI as the base image. This policy checks for valid Docker signed +verification summary attestations. + +With policy evaluation built into Docker Scout, you can monitor image compliance +in real time, integrate checks into your CI/CD workflows, and maintain +consistent standards for image security and provenance. + +## View existing policies + +To see the current policies applied to a mirrored DHI repository: + +1. Go to the mirrored DHI repository in [Docker Hub](https://hub.docker.com). +2. Select **View on Scout**. + + This opens the [Docker Scout dashboard](https://scout.docker.com), where you + can see which policies are currently active and whether your images meet the + policy criteria. + +Docker Scout automatically evaluates policy compliance when new images are +pushed. Each policy includes a compliance result and a link to the affected +images and layers. + +## Evaluate DHI policy compliance for your images + +When you enable Docker Scout for your repositories, you can configure the +[**Valid Docker Hardened Image (DHI) or DHI base +image**](../../scout/policy/_index.md#valid-docker-hardened-image-dhi-or-dhi-base-image) +policy. This optional policy validates whether your images are DHIs or built with DHI +base images by checking for Docker signed verification summary attestations. + +The following example shows how to build an image using a DHI base image and +evaluate its compliance with the DHI policy. + +### Example: Build and evaluate a DHI-based image + +#### Step 1: Use a DHI base image in your Dockerfile + +Create a Dockerfile that uses a Docker Hardened Image mirrored repository as the +base. For example: + +```dockerfile +# Dockerfile +FROM /dhi-python:3.13-alpine3.21 + +ENTRYPOINT ["python", "-c", "print('Hello from a DHI-based image')"] +``` + +#### Step 2: Build and push the image + +Open a terminal and navigate to the directory containing your Dockerfile. Then, +build and push the image to your Docker Hub repository: + +```console +$ docker build \ + --push \ + -t /my-dhi-app:v1 . +``` + +#### Step 3: Enable Docker Scout + +To enable Docker Scout for your organization and the repository, run the +following commands in your terminal: + +```console +$ docker login +$ docker scout enroll +$ docker scout repo enable --org /my-dhi-app +``` + +#### Step 4: Configure the DHI policy + +Once Docker Scout is enabled, you can configure the **Valid Docker Hardened +Image (DHI) or DHI base image** policy for your organization: + +1. Go to the [Docker Scout dashboard](https://scout.docker.com). +2. Select your organization and navigate to **Policies**. +3. Configure the **Valid Docker Hardened Image (DHI) or DHI base image** policy + to enable it for your repositories. + +For more information on configuring policies, see +[Configure policies](../../scout/policy/configure.md). + +#### Step 5: View policy compliance + +Once the DHI policy is configured and active, you can view compliance results: + +1. Go to the [Docker Scout dashboard](https://scout.docker.com). +2. Select your organization and navigate to **Images**. +3. Find your image, `/my-dhi-app:v1`, and select the link in the **Compliance** column. + +This shows the policy compliance results for your image. The **Valid Docker +Hardened Image (DHI) or DHI base image** policy evaluates whether your image has +a valid Docker signed verification summary attestation or if its base image has +such an attestation. + +You can now [evaluate policy compliance in your CI](/scout/policy/ci/). \ No newline at end of file diff --git a/content/manuals/dhi/how-to/scan.md b/content/manuals/dhi/how-to/scan.md new file mode 100644 index 00000000000..3969b5e44ad --- /dev/null +++ b/content/manuals/dhi/how-to/scan.md @@ -0,0 +1,428 @@ +--- +title: Scan Docker Hardened Images +linktitle: Scan an image +description: Learn how to scan Docker Hardened Images for known vulnerabilities using Docker Scout, Grype, Trivy, Wiz, or Mend.io. +keywords: scan container image, docker scout cves, grype scanner, trivy container scanner, mend.io, vex attestation +weight: 46 +--- + +Docker Hardened Images (DHIs) are designed to be secure by default, but like any +container image, it's important to scan them regularly as part of your +vulnerability management process. + +## Scan with OpenVEX-compliant scanners + +To get accurate vulnerability assessments, use scanners that support +[VEX](/manuals/dhi/core-concepts/vex.md) attestations. The following scanners can +read and apply the VEX statements included with Docker Hardened Images: + +- [Docker Scout](#docker-scout): Automatically applies VEX statements with zero configuration +- [Trivy](#trivy): Supports VEX through VEX Hub or local VEX files +- [Grype](#grype): Supports VEX via the `--vex` flag +- [Wiz](#wiz): Automatically applies VEX statements with + zero configuration +- [Mend.io](#mendio): Automatically applies VEX statements with + zero configuration + +For guidance on choosing the right scanner and understanding the differences +between VEX-enabled and non-VEX scanners, see [Scanner +integrations](/manuals/dhi/explore/scanner-integrations.md). + +## Docker Scout + +Docker Scout is integrated into Docker Desktop and the Docker CLI. It provides +vulnerability insights, CVE summaries, and direct links to remediation guidance. + +### Scan a DHI using Docker Scout + +To scan a Docker Hardened Image using Docker Scout, run the following +command: + +```console +$ docker login dhi.io +$ docker scout cves dhi.io/: --platform +``` + +Example output: + +```plaintext + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + v VEX statements obtained from attestation + v No vulnerable package detected + ... +``` + +For more detailed filtering and JSON output, see [Docker Scout CLI reference](/reference/cli/docker/scout/). + +### Build child images with provenance attestations + +When you build a custom image that uses a Docker Hardened Image as its base, you must build with `--provenance=mode=max` and `--sbom=true` so that Docker Scout can trace the base image lineage and correctly apply VEX statements. + +Without these flags, Docker Scout cannot identify the DHI base image in +the provenance chain. As a result, it reports CVEs that are already suppressed +by VEX statements in the base image, producing false CVE positives in your +scan results. + +> [!NOTE] +> **Why provenance attestation is required** +> +> Docker Scout uses max-mode provenance attestations to identify the DHI base image +> and track its lineage. A cryptographically signed provenance attestation ensures that +> base image lineage is verified and tamper-resistant, giving Docker Scout the trust +> anchor it needs to correctly apply VEX statements from the base image. + +To build with maximum provenance and SBOM attestations: + +```console +$ docker build \ + --provenance=mode=max \ + --sbom=true \ + --push \ + -t docker.io//: . +``` + +After building with these flags, Docker Scout reads the full provenance +chain, matches the DHI base image, and applies its VEX statements. Scans of +your child image then reflect the correct suppressed CVEs, giving you an +accurate vulnerability assessment. + +### VEX attestations in child images + +If you introduce new layers in your child image and want to suppress CVEs in those layers, you can attach your own VEX attestation to the child image independently, you do not need to duplicate or aggregate the VEX statements from the DHI base image. + +When `docker scout cves` runs against your child image, Scout reads VEX attestations from the full provenance chain and applies them cumulatively: + +- **Base image VEX** - attached to the DHI, applied to CVEs in base image layers +- **Child image VEX** - attached to your image, applied to CVEs in layers you introduced + +For example, if you add a `requests` layer to a DHI Python base image and attach a VEX statement suppressing `CVE-2024-47081`, Scout applies both VEX attestations independently and attributes each to its respective author: + +```text +✓ VEX statements obtained from attestation +CVE-2024-47081 VEX: not affected [vulnerable code not present] : +``` + +Scout suppresses CVEs from the DHI base VEX and CVEs from your child VEX in the same scan - no aggregate VEX document is required. + +To create and attach a VEX attestation to your child image: + +```bash +cat > child-vex.json << 'EOF' +{ + "@context": "https://openvex.dev/ns/v0.2.0", + "@id": "https:///vex//1", + "author": "", + "timestamp": "", + "version": 1, + "statements": [ + { + "vulnerability": { + "name": "" + }, + "products": [ + { + "@id": "pkg:pypi/@" + } + ], + "status": "not_affected", + "justification": "vulnerable_code_not_present" + } + ] +} +EOF + +docker scout attestation add \ + --file child-vex.json \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + docker.io//: +``` + +> [!NOTE] +> This is only possible because you built with `--provenance=mode=max`. Without the full +> provenance chain, Scout cannot traverse back to the base image to retrieve its VEX attestations. + +### Automate DHI scanning in CI/CD with Docker Scout + +Integrating Docker Scout into your CI/CD pipeline enables you to automatically +verify that images built from Docker Hardened Images remain free from known +vulnerabilities during the build process. This proactive approach ensures the +continued security integrity of your images throughout the development +lifecycle. + +#### Example GitHub Actions workflow + +The following is a sample GitHub Actions workflow that builds an image, scans it and pushes to the registry only if the scan passes: + +```yaml {collapse="true"} +name: DHI Vulnerability Scan + +on: + push: + branches: + - main + pull_request: + +env: + REGISTRY: docker.io + IMAGE_NAME: ${{ github.repository }} + SHA: ${{ github.event.pull_request.head.sha || github.event.after }} + +jobs: + scan: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + pull-requests: write + steps: + - name: Checkout repository + uses: actions/checkout@{{% param "checkout_action_version" %}} + + - name: Set up Docker with containerd image store + uses: docker/setup-docker-action@{{% param "setup_docker_action_version" %}} + with: + daemon-config: | + { + "features": { + "containerd-snapshotter": true + } + } + + - name: Log in to Docker Hub + uses: docker/login-action@{{% param "login_action_version" %}} + with: + registry: ${{ env.REGISTRY }} + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build + uses: docker/build-push-action@{{% param "build_push_action_version" %}} + with: + context: . + sbom: true + tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.SHA }} + + - name: Run Docker Scout CVE scan + uses: docker/scout-action@v1 + with: + command: cves + image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.SHA }} + only-severities: critical,high + exit-code: true + + - name: Push image + if: success() + run: | + docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.SHA }} +``` + +The `exit-code: true` parameter ensures that the workflow fails if any critical or +high-severity vulnerabilities are detected, preventing the deployment of +insecure images. + +> [!NOTE] +> +> The `--provenance=mode=max` and `--sbom=true` flags are required so that +> Docker Scout can trace the DHI base image lineage and correctly apply its +> VEX statements. Enabling the containerd image store via +> `docker/setup-docker-action` allows BuildKit to store attestations locally +> without pushing to a registry first. Without the containerd image store, +> Docker Engine rejects the build with: `Attestation is not supported for the docker driver. +> Switch to a different driver, or turn on the containerd image store, and try again.` +> The `Push image` step runs only if the scan passes, using `if: success()` +> to ensure images are only pushed to the registry when they are free of +> critical or high-severity vulnerabilities. + +For more details on using Docker Scout in CI, see [Integrating Docker +Scout with other systems](/manuals/scout/integrations/_index.md). + +## Grype + +[Grype](https://github.com/anchore/grype) is an open-source scanner that checks +container images against vulnerability databases like the NVD and distro +advisories. + +### Scan a DHI using Grype + +To scan a Docker Hardened Image using Grype with VEX filtering, first export +the VEX attestation and then scan with the `--vex` flag: + +```console +$ docker login dhi.io +$ docker pull dhi.io/: +$ docker scout vex get dhi.io/: --output vex.json +$ grype dhi.io/: --vex vex.json +``` + +The `--vex` flag applies VEX statements during the scan, filtering out known +non-exploitable CVEs for accurate results. + +For more information on exporting VEX attestations, see [Export VEX +attestations](#export-vex-attestations). + +## Trivy + +[Trivy](https://github.com/aquasecurity/trivy) is an open-source vulnerability +scanner for containers and other artifacts. It detects vulnerabilities in OS +packages and application dependencies. + +### Scan a DHI using Trivy + +After installing Trivy, you can scan a Docker Hardened Image by pulling +the image and running the scan command: + +```console +$ docker login dhi.io +$ docker pull dhi.io/: +$ trivy image --scanners vuln dhi.io/: +``` + +To filter vulnerabilities using VEX statements, Trivy supports multiple +approaches. Docker recommends using VEX Hub, which provides a seamless workflow +for automatically downloading and applying VEX statements from configured +repositories. + +#### Using VEX Hub (recommended) + +Configure Trivy to download the Docker Hardened Images advisories repository +from VEX Hub. Run the following commands to set up the VEX repository: + +```console +$ trivy vex repo init +$ cat << REPO > ~/.trivy/vex/repository.yaml +repositories: + - name: default + url: https://github.com/aquasecurity/vexhub + enabled: true + username: "" + password: "" + token: "" + - name: dhi-vex + url: https://github.com/docker-hardened-images/advisories + enabled: true +REPO +$ trivy vex repo list +$ trivy vex repo download +``` + +After setting up VEX Hub, you can scan a Docker Hardened Image with VEX filtering: + +```console +$ docker login dhi.io +$ docker pull dhi.io/: +$ trivy image --scanners vuln --vex repo dhi.io/: +``` + +For example, scanning the `dhi.io/python:3.13` image: + +```console +$ trivy image --scanners vuln --vex repo dhi.io/python:3.13 +``` + +Example output: + +```plaintext +Report Summary + +┌─────────────────────────────────────────────────────────────────────────────┬────────────┬─────────────────┐ +│ Target │ Type │ Vulnerabilities │ +├─────────────────────────────────────────────────────────────────────────────┼────────────┼─────────────────┤ +│ dhi.io/python:3.13 (debian 13.2) │ debian │ 0 │ +├─────────────────────────────────────────────────────────────────────────────┼────────────┼─────────────────┤ +│ opt/python-3.13.11/lib/python3.13/site-packages/pip-25.3.dist-info/METADATA │ python-pkg │ 0 │ +└─────────────────────────────────────────────────────────────────────────────┴────────────┴─────────────────┘ +Legend: +- '-': Not scanned +- '0': Clean (no security findings detected) +``` + +The `--vex repo` flag applies VEX statements from the configured repository during the scan, +which filters out known non-exploitable CVEs. + +#### Using local VEX files + +In addition to VEX Hub, Trivy also supports the use of local VEX files for +vulnerability filtering. You can download the VEX attestation that Docker +Hardened Images provide and use it directly with Trivy. + +First, download the VEX attestation for your image: + +```console +$ docker scout vex get dhi.io/: --output vex.json +``` + +Then scan the image with the local VEX file: + +```console +$ trivy image --scanners vuln --vex vex.json dhi.io/: +``` + +## Wiz + +[Wiz](https://www.wiz.io/) is a cloud security platform that includes container +image scanning capabilities with support for DHI VEX attestations. Wiz CLI +automatically consumes VEX statements from Docker Hardened Images to provide +accurate vulnerability assessments. + +### Scan a DHI using Wiz CLI + +After acquiring a Wiz subscription and installing the Wiz CLI, you can scan a +Docker Hardened Image by pulling the image and running the scan command: + +```console +$ docker login dhi.io +$ docker pull dhi.io/: +$ wizcli scan container-image dhi.io/: +``` + +## Mend.io + +[Mend.io](https://www.mend.io/) is an application security platform that +includes container image scanning with support for DHI VEX attestations. +Mend Container automatically retrieves and applies VEX statements from Docker +Hardened Images and combines them with Mend's reachability analysis for +comprehensive vulnerability assessment. + +### Scan a DHI using Mend.io + +After acquiring a Mend.io subscription and configuring +[Mend Container](https://docs.mend.io/container/latest/), Mend automatically +detects Docker Hardened Images and applies their VEX data without requiring any +additional configuration. When you scan a Docker Hardened Image through the Mend +AppSec Platform, VEX statements are automatically retrieved and attached as risk +factors to each finding. + +You can view and filter DHI-specific findings in the Mend AppSec Platform under +**Security > Containers > Packages**, where a Docker badge identifies hardened +image packages. Use the **Risk Factors** column to filter by VEX statuses such +as Not Affected, Fixed, or Under Investigation. + +For more information, see the [Mend.io Docker Hardened Images +documentation](https://docs.mend.io/platform/latest/docker-hardened-images). + +## Export VEX attestations + +For scanners that need local VEX files (like Grype or Trivy with local files), +you can export the VEX attestations from Docker Hardened Images. + +> [!NOTE] +> +> By default, VEX attestations are fetched from `registry.scout.docker.com`. Ensure that you can access this registry +> if your network has outbound restrictions. You can also mirror the attestations to an alternate registry. For more +> details, see [Mirror to a third-party registry](mirror.md#mirror-to-a-third-party-registry). + +Export VEX attestations to a JSON file: + +```console +$ docker scout vex get dhi.io/: --output vex.json +``` + +> [!NOTE] +> +> The `docker scout vex get` command requires [Docker Scout +> CLI](https://github.com/docker/scout-cli/) version 1.18.3 or later. +> +> If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use +> `registry://docs/dhi-python:3.13` instead of `docs/dhi-python:3.13`. + diff --git a/content/manuals/dhi/how-to/select-enterprise.md b/content/manuals/dhi/how-to/select-enterprise.md new file mode 100644 index 00000000000..645aff470be --- /dev/null +++ b/content/manuals/dhi/how-to/select-enterprise.md @@ -0,0 +1,263 @@ +--- +title: Get started with DHI Select and Enterprise +linkTitle: Use DHI Select & Enterprise +description: Mirror a repository and start using Docker Hardened Images for Select and Enterprise subscriptions. +keywords: docker hardened images, enterprise, select, mirror, quickstart +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +This guide shows you how to get started with DHI Select and Enterprise +subscriptions. Unlike DHI Community, this workflow lets you mirror repositories +to your organization namespace on Docker Hub, access compliance variants (FIPS), +customize images, and get SLA-backed updates. + +## Prerequisites + +To use this workflow, you need: + +- Organization owner access in your Docker Hub namespace. +- One of the following: + - A DHI Select or Enterprise subscription. [Contact Docker + sales](https://www.docker.com/products/hardened-images/#compare) to purchase + or learn more about these subscriptions. + - An active DHI trial. [Start a free DHI + trial](https://hub.docker.com/hardened-images/start-free-trial). +- [Docker Desktop](../../desktop/release-notes.md) 4.65 or later to use the + `docker dhi` CLI. + +Each step, when applicable, shows Docker Hub and command line instructions. You +can use either interface. + +## Step 1: Find an image to use + +{{< tabs group="interface" >}} +{{< tab name="Docker Hub" >}} + +1. Go to [Docker Hub](https://hub.docker.com/) and sign in. +2. Select your organization in the left sidebar. +3. Navigate to **Hardened Images** > **Catalog**. +4. Use the search bar or filters to find an image (for example, `python`, + `node`, or `golang`). For this example, search for `python`. + + To search for an image with a compliance variant (FIPS or STIG), select + **Filter by** and select the relevant compliance option. + +5. Select the Python repository to view its details. + +6. Select **Images** to view available image variants. + +{{< /tab >}} +{{< tab name="Command line" >}} + +1. List available image repositories: + + ```console + $ docker dhi catalog list --type image + ``` + +2. To filter by name and FIPS compliance, use the `--filter` and `--fips` flags: + + ```console + $ docker dhi catalog list --filter python --fips + ``` + +3. Get image details for the repository: + + ```console + $ docker dhi catalog get python + ``` + +{{< /tab >}} +{{< /tabs >}} + +Continue to the next step to mirror the image. To dive deeper into exploring +images see [Search and evaluate Docker Hardened Images](explore.md). + +## Step 2: Mirror the repository + +Mirroring copies a DHI repository into your organization namespace on Docker +Hub. This lets you receive SLA-backed Docker security patches for your images +and use customization as well as compliance variants. Only organization owners +can mirror repositories. + +{{< tabs group="interface" >}} +{{< tab name="Docker Hub" >}} + +1. In the image repository details page you found in the previous step, select + **Use this image** > **Mirror repository**. Note that you must be signed in + to Docker Hub to perform this action. +2. Select **Mirror**. +3. Wait for images to finish mirroring. This can take a few minutes. +4. Verify the mirrored repository appears in your organization namespace with a + `dhi-` prefix (for example, `dhi-python`). + +{{< /tab >}} +{{< tab name="Command line" >}} + +To use the following commands, you must authenticate or configure DHI CLI +authentication using your Docker token. For details, see [Use the DHI +CLI](cli.md#configuration). + +1. Start mirroring the repository to your organization namespace. Replace + `` with your organization name. + + ```console + $ docker dhi mirror start --org \ + -r dhi/python,/dhi-python + ``` + +2. Wait for images to finish mirroring. This can take a few minutes. + +3. Verify the mirrored repository. Replace `` with your organization + name. + + ```console + $ docker dhi mirror list --org + ``` + +{{< /tab >}} +{{< /tabs >}} + +Continue to the next step to customize the image. To dive deeper into mirroring +images see [Mirror a repository](mirror.md). + +## Step 3: Customize the image + +One of the key benefits of DHI Select and Enterprise is the ability to customize +your mirrored images. You can add system packages, configure settings, or make other +modifications to meet your organization's specific requirements. + +This example shows how to add the `curl` system package to your mirrored Python image. + +{{< tabs group="interface" >}} +{{< tab name="Docker Hub" >}} + +1. Go to your organization namespace on Docker Hub. +2. Navigate to your mirrored repository (for example, `dhi-python`). +3. Select **Customizations**. +4. Select **Create customization**. +5. Search for `3-alpine3.23` and select any one of the images. +6. In **Add packages**, select **curl**. +7. Select **Next: Configure**. +8. In **Customization name**, enter a name for your customization (for example, `curl`). +9. Select **Next: Review customization**. +10. Select **Create customization** to start the build. + +It can take a few minutes for the customization to build. Go to the +**Customizations** tab of your mirrored repository and view the **Last build** +column to monitor the build status. + +{{< /tab >}} +{{< tab name="Command line" >}} + +To use the following commands, you must authenticate or configure DHI CLI +authentication using your Docker token. For details, see [Use the DHI +CLI](cli.md#configuration). + +1. Create a customization. Replace `` with your organization name. + This creates a file called `my-customization.yaml` with the customization + details. + + ```console + $ docker dhi customization prepare --org python 3-alpine3.23 \ + --destination /dhi-python \ + --name "python with curl" \ + --output my-customization.yaml + ``` + +2. Add the `curl` package to the customization. You can edit the file with any + text or code editor. The following commands use `echo` to add the necessary + lines to the YAML file: + + ```console + $ echo "contents:" >> my-customization.yaml + $ echo " packages:" >> my-customization.yaml + $ echo " - curl" >> my-customization.yaml + ``` + +3. Apply the customization: + + ```console + $ docker dhi customization create --org my-customization.yaml + ``` + +4. Verify the customization was created: + + ```console + $ docker dhi customization list --org + ``` + +It can take a few minutes for the customization to build. To check the build status: + +1. Go to your organization namespace on Docker Hub. +2. Navigate to your mirrored repository (for example, `dhi-python`). +3. Select **Customizations**. +4. View the **Last build** column to monitor the build status. + +{{< /tab >}} +{{< /tabs >}} + +To dive deeper into customization, see [Customize a Docker Hardened +Image](customize.md). + +## Step 4: Pull and run your customized image + +After the customization build completes, you can pull and run the customized +image from your organization namespace on Docker Hub. + +1. Sign in to Docker Hub: + + ```console + $ docker login + ``` + +2. Pull the customized image from your organization. Replace `` with + your organization name. The customized tag includes the suffix based on your + customization name. + + ```console + $ docker pull /dhi-python:3-alpine3.23_python-with-curl + ``` + +3. Run the image and test that `curl` is installed: + + ```console + $ docker run --rm /dhi-python:3-alpine3.23_python-with-curl curl --version + ``` + + This confirms that the `curl` package was successfully added to the image. + +To dive deeper into using images, see: + +- [Use a Docker Hardened Image](use.md) for general usage +- [Use a Helm chart](helm.md) for deploying with Helm + +## Step 5: Remove customization and stop mirroring + +To remove the customization and stop mirroring the repository: + +1. Go to your organization namespace on Docker Hub. +2. Navigate to your mirrored repository (for example, `dhi-python`). +3. Select **Customizations**. +4. Find the customization you want to delete (for example, `python with curl`). +5. Select the trash can icon. +6. Select **Delete customization** to confirm the deletion. +7. To stop mirroring, go back to your organization's repositories list. +8. Find the mirrored repository (for example, `dhi-python`). +9. Select **Settings**. +10. Select **Stop mirroring**. +11. Select **Stop mirroring** to confirm. + +## What's next + +You've mirrored, customized, and run a Docker Hardened Image. Here are a few ways to keep going: + +- [Migrate existing applications to DHIs](../migration/migrate-with-ai.md): Use + Gordon to update your Dockerfiles to use Docker Hardened Images as the base. + +- [Verify DHIs](verify.md): Use tools like [Docker Scout](/scout/) or Cosign to + inspect and verify signed attestations, like SBOMs and provenance. + +- [Scan DHIs](scan.md): Analyze the image with Docker Scout or other scanners + to identify known CVEs. diff --git a/content/manuals/dhi/how-to/use.md b/content/manuals/dhi/how-to/use.md new file mode 100644 index 00000000000..c618f4d4ab6 --- /dev/null +++ b/content/manuals/dhi/how-to/use.md @@ -0,0 +1,327 @@ +--- +title: Use a Docker Hardened Image +linktitle: Use an image +description: Learn how to pull, run, and reference Docker Hardened Images in Dockerfiles, CI pipelines, and standard development workflows. +keywords: use hardened image, docker pull secure image, non-root containers, multi-stage dockerfile, dev image variant +weight: 30 +aliases: + - /dhi/how-to/els/ + - /dhi/how-to/k8s/ +--- + +You can use a Docker Hardened Image (DHI) just like any other image on Docker +Hub. DHIs follow the same familiar usage patterns. Pull them with `docker pull`, +reference them in your Dockerfile, and run containers with `docker run`. + +The key difference is that DHIs are security-focused and intentionally minimal +to reduce the attack surface. This means some variants don't include a shell or +package manager, and may run as a non-root user by default. + +> [!IMPORTANT] +> +> You must authenticate to the Docker Hardened Images registry (`dhi.io`) to +> pull DHI Community images. You can authenticate using either of the following: +> +> - **Docker ID and password:** Use your Docker Hub username and password. If +> you don't have a Docker account, [create one](../../accounts/create-account.md) +> for free. +> - **Access token:** Use a [personal access token +> (PAT)](../../security/access-tokens.md) for personal accounts, or an +> [organization access token +> (OAT)](../../enterprise/security/access-tokens.md) with your organization +> name as the username. +> +> Run `docker login dhi.io` to authenticate. + +## Considerations when adopting DHIs + +Docker Hardened Images are intentionally minimal to improve security. If you're +updating existing Dockerfiles or frameworks to use DHIs, keep in mind that +runtime images don't include shells or package managers, run as non-root users +by default, and may have different configurations than images you're familiar +with. + +For a comprehensive checklist of migration considerations and detailed guidance, +see [Migrate to Docker Hardened Images](../migration/_index.md). + +## Pull, run, and reference DHIs + +Docker Hardened Images use different image references depending on your +subscription: + +| Subscription | Image reference | Authentication | +|---------------------|----------------------------|-----------------------| +| Community | `dhi.io/:` | `docker login dhi.io` | +| Select & Enterprise | `/:` | `docker login` | + +Select and Enterprise users should [mirror](./mirror.md) repositories to their +Docker Hub organization to access compliance variants and customization +features. + +After authenticating, use the image reference in standard Docker commands and +Dockerfiles. For example: + +```console +$ docker pull dhi.io/python:3.13 +$ docker run --rm dhi.io/python:3.13 python -c "print('Hello from DHI')" +``` + +```dockerfile +FROM dhi.io/python:3.13 +COPY . /app +CMD ["python", "/app/main.py"] +``` + +For multi-stage builds: +- Use a `-dev` tag for build stages that need a shell or package manager. See + [Use dev variants for framework-based + applications](#use-dev-variants-for-framework-based-applications). +- Use the `static` image for compiled executables with minimal runtime + dependencies. See [Use a static image for compiled + executables](#use-a-static-image-for-compiled-executables). + +To learn how to search for available variants, see [Search and evaluate +images](./explore.md). + +## Use a DHI in CI/CD pipelines + +Docker Hardened Images work just like any other image in your CI/CD pipelines. +You can reference them in Dockerfiles, pull them as part of a pipeline step, or +run containers based on them during builds and tests. + +Unlike typical container images, DHIs also include signed +[attestations](../core-concepts/attestations.md) such as SBOMs and provenance +metadata. You can incorporate these into your pipeline to support supply chain +security, policy checks, or audit requirements if your tooling supports it. + +To strengthen your software supply chain, consider adding your own attestations +when building images from DHIs. This lets you document how the image was built, +verify its integrity, and enable downstream validation and policy enforcement +using tools like Docker Scout. + +To learn how to attach attestations during the build process, see [Docker Build +Attestations](/manuals/build/metadata/attestations.md). + +### Discover attestations with ORAS + +You can use [ORAS](https://oras.land/) to discover and inspect the attestations +attached to Docker Hardened Images. This is particularly useful in CI/CD +pipelines for supply chain security validation and compliance checks. + +For automated workflows, authenticate using an [organization access token +(OAT)](../../enterprise/security/access-tokens.md). OATs are owned by the +organization rather than an individual user, making them better suited for CI/CD +pipelines. + +To discover attestations with ORAS: + +1. [Generate an organization access + token](../../enterprise/security/access-tokens.md) with **Read public + repositories** scope. + + The following example shows how to discover attestations on DHI community + images from `dhi.io`. If you're discovering attestations on images mirrored to + your organization, generate an OAT scoped to read from your mirrored repository + instead of **Read public repositories**. + +2. Sign in to `dhi.io` using your organization name as the username and the OAT + as the password. + + > [!WARNING] + > + > The following examples export credentials directly on the command line for + > demonstration purposes. This exposes sensitive tokens in your shell history + > and process list. In production environments, use secure methods such as + > reading from files with restricted permissions, environment files loaded + > at runtime, or secret management tools. + + ```console + $ oras login dhi.io -u + ``` + + Or non-interactively in a CI/CD pipeline, set your organization name and token: + + ```console + $ export DOCKER_ORG="YOUR_ORGANIZATION_NAME" + $ export OAT="YOUR_ORGANIZATION_ACCESS_TOKEN" + $ echo $OAT | oras login dhi.io -u "$DOCKER_ORG" --password-stdin + ``` + +3. Discover attestations on a DHI image: + + ```console + $ oras discover dhi.io/node:24-dev --platform linux/amd64 + ``` + + > [!NOTE] + > + > The `--platform` flag is required. Without it, `oras discover` resolves to + > the multi-arch image index, which returns only an index-level signature + > rather than the full set of per-platform attestations. + + A successful response lists the attestations attached to the image, + including SBOMs, provenance, vulnerability reports, and changelog metadata. + +## Use a static image for compiled executables + +Docker Hardened Images include a `static` image repository designed specifically +for running compiled executables in an extremely minimal and secure runtime. +Unlike a non-hardened `FROM scratch` image, the DHI `static` image includes +attestations and essential packages like `ca-certificates`. + +Use a `-dev` or other builder image to compile your binary, then copy the output +into a `static` image: + +```dockerfile +FROM dhi.io/golang:1.22-dev AS build +WORKDIR /app +COPY . . +RUN CGO_ENABLED=0 go build -o myapp + +FROM dhi.io/static:20230311 +COPY --from=build /app/myapp /myapp +ENTRYPOINT ["/myapp"] +``` + +For more multi-stage build patterns, see the [Go migration +example](../migration/examples/go.md). + +## Use dev variants for framework-based applications + +If you're building applications with frameworks that require package managers or +build tools (such as Python, Node.js, or Go), use a `-dev` variant during the +development or build stage. These variants include essential utilities like +shells, compilers, and package managers to support local iteration and CI +workflows. + +Use `-dev` images in your inner development loop or in isolated CI stages to +maximize productivity. Once you're ready to produce artifacts for production, +switch to a smaller runtime variant to reduce the attack surface and image size. + +For detailed multi-stage Dockerfile examples using dev variants, see the +migration examples: +- [Go](../migration/examples/go.md) +- [Python](../migration/examples/python.md) +- [Node.js](../migration/examples/node.md) + +## Use compliance and ELS variants + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +With a DHI Select or DHI Enterprise subscription, you can access additional +image variants: + +- Compliance variants: FIPS-enabled and STIG-ready images for regulatory + requirements +- ELS (Extended Lifecycle Support) variants (requires add-on): Security patches + for end-of-life image versions + +To access these variants, [mirror](./mirror.md) the repository to your Docker +Hub organization. For ELS, enable **Mirror end-of-life images** when setting up +mirroring. Once mirrored, use the compliance or EOL tags like any other image +tag. + +## Use with Kubernetes + +When deploying Docker Hardened Images to Kubernetes, the process is similar to +using any other container image with one key difference: you must configure +image pull secrets to authenticate to the DHI registry. This applies whether +you're pulling directly from `dhi.io`, from a mirror on Docker Hub, or from +your own third-party registry. + +### Create an image pull secret + +You can create an image pull secret using either an access token or Docker Desktop credentials. + +For the `--docker-server` value: +- Use `dhi.io` for community images pulled directly from Docker Hardened Images +- Use `docker.io` for mirrored repositories on Docker Hub +- Use your registry's hostname for third-party registries + +#### Using an access token + +Create a secret using a [Personal Access Token +(PAT)](../../security/access-tokens.md) or [Organization Access Token +(OAT)](../../enterprise/security/access-tokens.md). Ensure the token has at +least read-only access to the repositories. + +```console +$ kubectl create -n secret docker-registry --docker-server= \ + --docker-username= --docker-password= \ + --docker-email= +``` + +#### Using Docker Desktop credentials + +If you're already authenticated with Docker Desktop, you can create a secret +using your stored credentials. This method works for registries you've +authenticated to via Docker Desktop (using `docker login `). + +```console +$ NS= +$ kubectl create -n ${NS} secret docker-registry dhi-pull-secret \ + --docker-server= \ + --docker-username= \ + --docker-password="$(echo https:// | docker-credential-desktop get | jq -r .Secret)" \ + --docker-email= +``` + +This method extracts credentials from Docker Desktop's credential store, avoiding the need to create a separate access token for local development. + +### Test the image pull secret + +After creating the secret, verify it works by deploying a test pod that +references the secret in its `imagePullSecrets` configuration. + +Create a test pod: + +```console +kubectl apply --wait -f - < +spec: + containers: + - name: test + image: bash:5 + command: [ "sh", "-c", "echo 'Hello from DHI in Kubernetes!'" ] + imagePullSecrets: + - name: +EOF +``` + +Check the pod status to ensure it completed successfully: + +```console +$ kubectl get -n pods/dhi-test +``` + +A successful test shows `Completed` status: + +```console +NAME READY STATUS RESTARTS AGE +dhi-test 0/1 Completed ... ... +``` + +If you see `ErrImagePull` status instead, there's an issue with your secret +configuration: + +```console +NAME READY STATUS RESTARTS AGE +dhi-test 0/1 ErrImagePull 0 ... +``` + +Verify the pod output matches the expected message: + +```console +$ kubectl logs -n pods/dhi-test +Hello from DHI in Kubernetes! +``` + +Clean up the test pod: + +```console +$ kubectl delete -n pods/dhi-test +``` diff --git a/content/manuals/dhi/how-to/verify.md b/content/manuals/dhi/how-to/verify.md new file mode 100644 index 00000000000..1d4e94ae99f --- /dev/null +++ b/content/manuals/dhi/how-to/verify.md @@ -0,0 +1,403 @@ +--- +title: Verify a Docker Hardened Image or chart +linktitle: Verify an image or chart +description: Use Docker Scout or cosign to verify signed attestations like SBOMs, provenance, and vulnerability data for Docker Hardened Images and charts. +weight: 40 +keywords: verify container image, docker scout attest, cosign verify, sbom validation, signed container attestations, helm chart verification +--- + +Docker Hardened Images (DHI) and charts include signed attestations that verify +the build process, contents, and security posture. + +Docker's public key for DHI images and charts is published at: + +- https://registry.scout.docker.com/keyring/dhi/latest.pub +- https://github.com/docker-hardened-images/keyring + +Docker recommends using [Docker Scout](/scout/), but you can use +[`regctl`](https://github.com/regclient/regclient) and +[`cosign`](https://docs.sigstore.dev/) to retrieve and verify attestations. +Docker Scout offers several key advantages: it understands DHI attestation +structures, automatically resolves platforms, provides human-readable summaries, +validates in one step with `--verify`, and integrates tightly with Docker's +attestation infrastructure. + +> [!IMPORTANT] +> +> You must authenticate to the Docker Hardened Images registry (`dhi.io`) to +> pull images. Use your Docker ID credentials (the same username and password +> you use for Docker Hub) when signing in. If you don't have a Docker account, +> [create one](../../accounts/create-account.md) for free. +> +> Run `docker login dhi.io` to authenticate. + +## Verify image attestations + +> [!NOTE] +> +> Before you run `docker scout attest` commands, ensure any image that you have +> pulled locally is up to date with the remote image. You can do this by running +> `docker pull`. If you don't do this, you may see `No attestation found`. + +### List available attestations + +To list attestations for a mirrored DHI image: + +{{< tabs group="tool" >}} +{{< tab name="Docker Scout" >}} + +> [!NOTE] +> +> If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use +> `registry://dhi.io/python:3.13` instead of `dhi.io/python:3.13`. + +```console +$ docker scout attest list dhi.io/: +``` + +This command shows all available attestations, including SBOMs, provenance, vulnerability reports, and more. + +{{< /tab >}} +{{< tab name="regctl" >}} + +First, authenticate to both registries. Prepare a [personal access token +(PAT)](../../security/access-tokens.md) for your user with `read only` access: + +> [!WARNING] +> +> The following examples export credentials directly on the command line for +> demonstration purposes. This exposes sensitive tokens in your shell history +> and process list. In production environments, use secure methods such as +> reading from files with restricted permissions, environment files loaded +> at runtime, or secret management tools. + +```console +$ export DOCKER_USERNAME="YOUR_DOCKER_USERNAME" +$ export DOCKER_PAT="YOUR_DOCKER_PAT" +$ export DOCKER_ORG="YOUR_DOCKER_ORG" +$ echo $DOCKER_PAT | regctl registry login -u "$DOCKER_USERNAME" --pass-stdin docker.io +$ echo $DOCKER_PAT | regctl registry login -u "$DOCKER_USERNAME" --pass-stdin registry.scout.docker.com +``` + +Then list attestations using the `--external` flag. DHI repositories store image +layers on `dhi.io` (or `docker.io` for mirrored images) and signed attestations +in `registry.scout.docker.com`: + +```console +$ regctl artifact list docker.io/${DOCKER_ORG}/: \ + --external registry.scout.docker.com/${DOCKER_ORG}/ \ + --platform linux/amd64 +``` + +For example: + +```console +$ regctl artifact list docker.io/${DOCKER_ORG}/dhi-node:22 \ + --external registry.scout.docker.com/${DOCKER_ORG}/dhi-node \ + --platform linux/amd64 +``` + + +{{< /tab >}} +{{< /tabs >}} + +### Retrieve a specific attestation + +{{< tabs group="tool" >}} +{{< tab name="Docker Scout" >}} + +To retrieve a specific attestation, use the `--predicate-type` flag with the full predicate type URI: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + dhi.io/: +``` + +> [!NOTE] +> +> If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use +> `registry://dhi.io/python:3.13` instead of `dhi.io/python:3.13`. + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + dhi.io/python:3.13 +``` + +To retrieve only the predicate body: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + --predicate \ + dhi.io/: +``` + +{{< /tab >}} +{{< tab name="regctl" >}} + +Once you've listed attestations, download the full attestation artifact using the digest from the `Name` field: + +```console +$ regctl artifact get > attestation.json +``` + +For example, to save a SLSA provenance attestation: + +```console +$ regctl artifact get registry.scout.docker.com/${DOCKER_ORG}/dhi-node@sha256:6cbf803796e281e535f2681de7cd33a1012202610322a50ee745d1bb02ac3c18 > slsa_provenance.json +``` + +{{< /tab >}} +{{< /tabs >}} + +### Validate the attestation + +{{< tabs >}} +{{< tab name="Docker Scout" >}} + +To validate the attestation using Docker Scout, you can use the `--verify` flag: + +```console +$ docker scout attest get dhi.io/: \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify +``` + +> [!NOTE] +> +> If the image exists locally on your device, you must prefix the image name +> with `registry://`. For example, use `registry://dhi.io/node:20.19-debian12` +> instead of `dhi.io/node:20.19-debian12`. + + +For example, to verify the SBOM attestation for the `dhi.io/node:20.19-debian12` image: + +```console +$ docker scout attest get dhi.io/node:20.19-debian12 \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify +``` + +{{< /tab >}} +{{< tab name="cosign" >}} + +Once you've listed the attestations and obtained the digest from the `Name` field, verify them using cosign: + +```console +$ cosign verify \ + \ + --key https://registry.scout.docker.com/keyring/dhi/latest.pub \ + --insecure-ignore-tlog=true +``` + +For example: + +```console +$ cosign verify \ + registry.scout.docker.com/${DOCKER_ORG}/dhi-node@sha256:6cbf803796e281e535f2681de7cd33a1012202610322a50ee745d1bb02ac3c18 \ + --key https://registry.scout.docker.com/keyring/dhi/latest.pub \ + --insecure-ignore-tlog=true +``` + +> [!NOTE] +> +> The `--insecure-ignore-tlog=true` flag is needed because DHI attestations +> may not be recorded in the public Rekor transparency log to protect private +> customer information. The attestation signature is still verified against +> Docker's public key. + +{{< /tab >}} +{{< /tabs >}} + +#### Handle missing transparency log entries + +When using `--verify` with Docker Scout or `cosign verify`, you may sometimes +see an error like: + +```text +ERROR no matching signatures: signature not found in transparency log +``` + +This occurs because Docker Hardened Images don't always record attestations in +the public [Rekor](https://docs.sigstore.dev/logging/overview/) transparency +log. In cases where an attestation would contain private user information (for +example, your organization's namespace in the image reference), writing it to +Rekor would expose that information publicly. + +Even if the Rekor entry is missing, the attestation is still signed with +Docker's public key and can be verified offline by skipping the Rekor +transparency log check. + +To skip the transparency log check and validate against Docker's key, use the +`--skip-tlog` flag: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + dhi.io/: \ + --verify --skip-tlog +``` + +> [!NOTE] +> +> The `--skip-tlog` flag is only available in Docker Scout CLI version 1.18.2 and +> later. +> +> If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use +> `registry://dhi.io/python:3.13` instead of `dhi.io/python:3.13`. + + +This is equivalent to using `cosign` with the `--insecure-ignore-tlog=true` +flag, which validates the signature against Docker's published public key, but +ignores the transparency log check. + +### Show the equivalent cosign command + +When using the `--verify` flag, it also prints the corresponding +[cosign](https://docs.sigstore.dev/) command to verify the image signature: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + --verify \ + dhi.io/: +``` + +> [!NOTE] +> +> If the image exists locally on your device, you must prefix the image name with `registry://`. For example, use +> `registry://dhi.io/python:3.13` instead of `dhi.io/python:3.13`. + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + --verify \ + dhi.io/python:3.13 +``` + +If verification succeeds, Docker Scout prints the full `cosign verify` command. + +Example output: + +```console + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + v cosign verify ... +``` + +> [!IMPORTANT] +> +> When using cosign, you must first authenticate to both the DHI registry +> and the Docker Scout registry. +> +> For example: +> +> ```console +> $ docker login dhi.io +> $ docker login registry.scout.docker.com +> $ cosign verify ... +> ``` + +## Verify package attestations + +In addition to image attestations, individual hardened packages have their own +attestations. These package-level attestations allow you to verify the +provenance and build information for specific packages within an image. + +For instructions on how to extract package information from image attestations +and retrieve package-level attestations, see [Package +attestations](./hardened-packages.md#package-attestations). + +## Verify Helm chart attestations with Docker Scout + +Docker Hardened Image Helm charts include the same comprehensive attestations +as container images. The verification process for charts is identical to that +for images, using the same Docker Scout CLI commands. + +### List available chart attestations + +To list attestations for a DHI Helm chart: + +```console +$ docker scout attest list dhi.io/: +``` + +For example, to list attestations for the external-dns chart: + +```console +$ docker scout attest list dhi.io/external-dns-chart:1.20.0 +``` + +This command shows all available chart attestations, including SBOMs, provenance, vulnerability reports, and more. + +### Retrieve a specific chart attestation + +To retrieve a specific attestation from a Helm chart, use the `--predicate-type` flag with the full predicate type URI: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + dhi.io/: +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + dhi.io/external-dns-chart:1.20.0 +``` + +To retrieve only the predicate body: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.6 \ + --predicate \ + dhi.io/: +``` + +### Validate chart attestations with Docker Scout + +To validate a chart attestation using Docker Scout, use the `--verify` flag: + +```console +$ docker scout attest get dhi.io/: \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify +``` + +For example, to verify the SBOM attestation for the external-dns chart: + +```console +$ docker scout attest get dhi.io/external-dns-chart:1.20.0 \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify +``` + +The same `--skip-tlog` flag described in [Handle missing transparency log +entries](#handle-missing-transparency-log-entries) can also be used with chart +attestations when needed. + +## Available DHI attestations + +See [available +attestations](../core-concepts/attestations.md#image-attestations) for a list +of attestations available for each DHI image and [Helm chart +attestations](../core-concepts/attestations.md#helm-chart-attestations) for a +list of attestations available for each DHI chart. + +## Explore attestations on Docker Hub + +You can also browse attestations visually when [exploring an image +variant](./explore.md#image-variant-details). The **Attestations** section +lists each available attestation with its: + +- Type (e.g. SBOM, VEX) +- Predicate type URI +- Digest reference for use with `cosign` + +These attestations are generated and signed automatically as part of the Docker +Hardened Image or chart build process. \ No newline at end of file diff --git a/content/manuals/dhi/migration/_index.md b/content/manuals/dhi/migration/_index.md new file mode 100644 index 00000000000..fb7aa54975c --- /dev/null +++ b/content/manuals/dhi/migration/_index.md @@ -0,0 +1,53 @@ +--- +title: Migration +description: Learn how to migrate your existing applications to Docker Hardened Images +weight: 18 +keywords: migrate, docker hardened images, dhi, migration guide +aliases: + - /dhi/how-to/migrate/ +params: + grid_migration_paths: + - title: Migrate with Gordon + description: Use Gordon to automatically migrate your Dockerfile to Docker Hardened Images with guidance and recommendations. + icon: smart_toy + link: /dhi/migration/migrate-with-ai/ + - title: Migrate from Alpine or Debian images + description: Manual migration guide for moving from Docker Official Images (Alpine or Debian-based) to Docker Hardened Images. + icon: code + link: /dhi/migration/migrate-from-doi/ + - title: Migrate from Ubuntu + description: Manual migration guide for transitioning from Ubuntu-based images to Docker Hardened Images. + icon: upgrade + link: /dhi/migration/migrate-from-ubuntu/ + - title: Migrate from Wolfi + description: Manual migration guide for transitioning from Wolfi-based images to Docker Hardened Images. + icon: transform + link: /dhi/migration/migrate-from-wolfi/ + + grid_migration_resources: + - title: Migration checklist + description: A comprehensive checklist of migration considerations to ensure successful transition to Docker Hardened Images. + icon: checklist + link: /dhi/migration/checklist/ + - title: Examples + description: Example Dockerfile migrations for different programming languages and frameworks to guide your migration process. + icon: preview + link: /dhi/migration/examples/ +--- + +This section provides guidance for migrating your applications to Docker +Hardened Images (DHI). Migrating to DHI enhances the security posture of your +containerized applications by leveraging hardened base images with built-in +security features. + +## Migration paths + +Choose the migration approach that best fits your needs: + +{{< grid items="grid_migration_paths" >}} + +## Resources + +{{< grid items="grid_migration_resources" >}} + + diff --git a/content/manuals/dhi/migration/checklist.md b/content/manuals/dhi/migration/checklist.md new file mode 100644 index 00000000000..fc6034356f5 --- /dev/null +++ b/content/manuals/dhi/migration/checklist.md @@ -0,0 +1,21 @@ +--- +title: Migration checklist +description: A checklist of considerations when migrating to Docker Hardened Images +weight: 10 +keywords: migration checklist, dhi, docker hardened images +--- + +Use this checklist to ensure you address key considerations when migrating to Docker Hardened Images. + +## Migration considerations + +| Item | Action required | +|:-------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Base image | Update your Dockerfile `FROM` statements to reference a Docker Hardened Image instead of your current base image. | +| Package management | Install packages only in `dev`-tagged images during build stages. Use `apk` for Alpine-based images or `apt` for Debian-based images. Copy the necessary artifacts to your runtime stage, as runtime images don't include package managers. | +| Non-root user | Verify that all files and directories your application needs are readable and writable by the nonroot user (UID 65532), as runtime images run as nonroot by default. | +| Multi-stage build | Use `dev` or `sdk`-tagged images for build stages where you need build tools and package managers. Use non-dev images for your final runtime stage. | +| TLS certificates | Remove any steps that install ca-certificates, as DHIs include ca-certificates by default. | +| Ports | Configure your application to listen on port 1025 or higher inside the container, as the nonroot user can't bind to privileged ports (below 1024) in Kubernetes or Docker Engine versions older than 20.10. | +| Entry point | Check the entry point of your chosen DHI using `docker inspect` or the image documentation. Update your Dockerfile's `ENTRYPOINT` or `CMD` instructions if your application relies on a different entry point. | +| No shell | Move any shell commands (`RUN`, `SHELL`) to build stages using `dev`-tagged images. Runtime images don't include a shell, so copy all necessary artifacts from the build stage. | diff --git a/content/manuals/dhi/migration/examples/_index.md b/content/manuals/dhi/migration/examples/_index.md new file mode 100644 index 00000000000..8044b2844e3 --- /dev/null +++ b/content/manuals/dhi/migration/examples/_index.md @@ -0,0 +1,32 @@ +--- +title: Migration examples +description: Real-world examples of migrating to Docker Hardened Images +weight: 40 +keywords: migration examples, dhi, docker hardened images +params: + grid_examples: + - title: Go + description: Learn how to migrate Go applications to Docker Hardened Images with practical examples and best practices. + icon: code + link: /dhi/migration/examples/go/ + - title: Python + description: Learn how to migrate Python applications to Docker Hardened Images with practical examples and best practices. + icon: code + link: /dhi/migration/examples/python/ + - title: Node.js + description: Learn how to migrate Node.js applications to Docker Hardened Images with practical examples and best practices. + icon: code + link: /dhi/migration/examples/node/ +--- + +This section provides detailed migration examples for common programming languages and frameworks. + +## Available examples + +{{< grid items="grid_examples" >}} + +In addition to this documentation, each Docker Hardened Image repository in +the [Docker Hardened Images +catalog](https://hub.docker.com/hardened-images/catalog) includes image-specific +guidance and best practices for migrating applications built with that language +or framework. \ No newline at end of file diff --git a/content/manuals/dhi/migration/examples/go.md b/content/manuals/dhi/migration/examples/go.md new file mode 100644 index 00000000000..259a4361119 --- /dev/null +++ b/content/manuals/dhi/migration/examples/go.md @@ -0,0 +1,134 @@ +--- +title: Go +description: Migrate a Go application to Docker Hardened Images +weight: 10 +keywords: go, golang, migration, dhi +--- + +This example shows how to migrate a Go application to Docker Hardened Images. + +The following examples show Dockerfiles before and after migration to Docker +Hardened Images. Each example includes five variations: + +- Before (Ubuntu): A sample Dockerfile using Ubuntu-based images, before migrating to DHI +- Before (Wolfi): A sample Dockerfile using Wolfi distribution images, before migrating to DHI +- Before (DOI): A sample Dockerfile using Docker Official Images, before migrating to DHI +- After (multi-stage): A sample Dockerfile after migrating to DHI with multi-stage builds (recommended for minimal, secure images) +- After (single-stage): A sample Dockerfile after migrating to DHI with single-stage builds (simpler but results in a larger image with a broader attack surface) + +> [!NOTE] +> +> Multi-stage builds are recommended for most use cases. Single-stage builds are +> supported for simplicity, but come with tradeoffs in size and security. +> +> You must authenticate to `dhi.io` before you can pull Docker Hardened Images. +> Use your Docker ID credentials (the same username and password you use for +> Docker Hub). If you don't have a Docker account, [create +> one](../../../accounts/create-account.md) for free. +> +> Run `docker login dhi.io` to authenticate. + +{{< tabs >}} +{{< tab name="Before (Ubuntu)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM ubuntu/go:1.22-24.04 + +WORKDIR /app +ADD . ./ + +# Install any additional packages if needed using apt +# RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/* + +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags="-s -w" --installsuffix cgo -o main . + +ENTRYPOINT ["/app/main"] +``` + +{{< /tab >}} +{{< tab name="Before (Wolfi)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM cgr.dev/chainguard/go:latest-dev + +WORKDIR /app +ADD . ./ + +# Install any additional packages if needed using apk +# RUN apk add --no-cache git + +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags="-s -w" --installsuffix cgo -o main . + +ENTRYPOINT ["/app/main"] +``` + +{{< /tab >}} +{{< tab name="Before (DOI)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM golang:latest + +WORKDIR /app +ADD . ./ + +# Install any additional packages if needed using apt +# RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/* + +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags="-s -w" --installsuffix cgo -o main . + +ENTRYPOINT ["/app/main"] +``` + +{{< /tab >}} +{{< tab name="After (multi-stage)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +# === Build stage: Compile Go application === +FROM dhi.io/golang:1-alpine3.21-dev AS builder + +WORKDIR /app +ADD . ./ + +# Install any additional packages if needed using apk +# RUN apk add --no-cache git + +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags="-s -w" --installsuffix cgo -o main . + +# === Final stage: Create minimal runtime image === +FROM dhi.io/golang:1-alpine3.21 + +WORKDIR /app +COPY --from=builder /app/main /app/main + +ENTRYPOINT ["/app/main"] +``` + +{{< /tab >}} +{{< tab name="After (single-stage)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM dhi.io/golang:1-alpine3.21-dev + +WORKDIR /app +ADD . ./ + +# Install any additional packages if needed using apk +# RUN apk add --no-cache git + +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags="-s -w" --installsuffix cgo -o main . + +ENTRYPOINT ["/app/main"] +``` + +{{< /tab >}} +{{< /tabs >}} diff --git a/content/manuals/dhi/migration/examples/node.md b/content/manuals/dhi/migration/examples/node.md new file mode 100644 index 00000000000..07ddd36b68e --- /dev/null +++ b/content/manuals/dhi/migration/examples/node.md @@ -0,0 +1,143 @@ +--- +title: Node.js +description: Migrate a Node.js application to Docker Hardened Images +weight: 30 +keywords: nodejs, node, migration, dhi +--- + +This example shows how to migrate a Node.js application to Docker Hardened Images. + +The following examples show Dockerfiles before and after migration to Docker +Hardened Images. Each example includes five variations: + +- Before (Ubuntu): A sample Dockerfile using Ubuntu-based images, before migrating to DHI +- Before (Wolfi): A sample Dockerfile using Wolfi distribution images, before migrating to DHI +- Before (DOI): A sample Dockerfile using Docker Official Images, before migrating to DHI +- After (multi-stage): A sample Dockerfile after migrating to DHI with multi-stage builds (recommended for minimal, secure images) +- After (single-stage): A sample Dockerfile after migrating to DHI with single-stage builds (simpler but results in a larger image with a broader attack surface) + +> [!NOTE] +> +> Multi-stage builds are recommended for most use cases. Single-stage builds are +> supported for simplicity, but come with tradeoffs in size and security. +> +> You must authenticate to `dhi.io` before you can pull Docker Hardened Images. +> Use your Docker ID credentials (the same username and password you use for +> Docker Hub). If you don't have a Docker account, [create +> one](../../../accounts/create-account.md) for free. +> +> Run `docker login dhi.io` to authenticate. + +{{< tabs >}} +{{< tab name="Before (Ubuntu)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM ubuntu/node:18-24.04_edge +WORKDIR /usr/src/app + +COPY package*.json ./ + +RUN npm install + +COPY . . + +CMD ["node", "index.js"] +``` + +{{< /tab >}} +{{< tab name="Before (Wolfi)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM cgr.dev/chainguard/node:latest-dev +WORKDIR /usr/src/app + +COPY package*.json ./ + +# Install any additional packages if needed using apk +# RUN apk add --no-cache python3 make g++ + +RUN npm install + +COPY . . + +CMD ["node", "index.js"] +``` + +{{< /tab >}} +{{< tab name="Before (DOI)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM node:latest +WORKDIR /usr/src/app + +COPY package*.json ./ + +# Install any additional packages if needed using apt +# RUN apt-get update && apt-get install -y python3 make g++ && rm -rf /var/lib/apt/lists/* + +RUN npm install + +COPY . . + +CMD ["node", "index.js"] +``` + +{{< /tab >}} +{{< tab name="After (multi-stage)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +# === Build stage: Install dependencies and build application === +FROM dhi.io/node:23-alpine3.21-dev AS builder +WORKDIR /usr/src/app + +COPY package*.json ./ + +# Install any additional packages if needed using apk +# RUN apk add --no-cache python3 make g++ + +RUN npm install + +COPY . . + +# === Final stage: Create minimal runtime image === +FROM dhi.io/node:23-alpine3.21 +ENV PATH=/app/node_modules/.bin:$PATH + +COPY --from=builder --chown=node:node /usr/src/app /app + +WORKDIR /app + +CMD ["index.js"] +``` + +{{< /tab >}} +{{< tab name="After (single-stage)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM dhi.io/node:23-alpine3.21-dev +WORKDIR /usr/src/app + +COPY package*.json ./ + +# Install any additional packages if needed using apk +# RUN apk add --no-cache python3 make g++ + +RUN npm install + +COPY . . + +CMD ["node", "index.js"] +``` + +{{< /tab >}} +{{< /tabs >}} diff --git a/content/manuals/dhi/migration/examples/python.md b/content/manuals/dhi/migration/examples/python.md new file mode 100644 index 00000000000..a844acc5dbc --- /dev/null +++ b/content/manuals/dhi/migration/examples/python.md @@ -0,0 +1,203 @@ +--- +title: Python +description: Migrate a Python application to Docker Hardened Images +weight: 20 +keywords: python, migration, dhi +--- + +This example shows how to migrate a Python application to Docker Hardened Images. + +The following examples show Dockerfiles before and after migration to Docker +Hardened Images. Each example includes five variations: + +- Before (Ubuntu): A sample Dockerfile using Ubuntu-based images, before migrating to DHI +- Before (Wolfi): A sample Dockerfile using Wolfi distribution images, before migrating to DHI +- Before (DOI): A sample Dockerfile using Docker Official Images, before migrating to DHI +- After (multi-stage): A sample Dockerfile after migrating to DHI with multi-stage builds (recommended for minimal, secure images) +- After (single-stage): A sample Dockerfile after migrating to DHI with single-stage builds (simpler but results in a larger image with a broader attack surface) + +> [!NOTE] +> +> Multi-stage builds are recommended for most use cases. Single-stage builds are +> supported for simplicity, but come with tradeoffs in size and security. +> +> You must authenticate to `dhi.io` before you can pull Docker Hardened Images. +> Use your Docker ID credentials (the same username and password you use for +> Docker Hub). If you don't have a Docker account, [create +> one](../../../accounts/create-account.md) for free. +> +> Run `docker login dhi.io` to authenticate. + +{{< tabs >}} +{{< tab name="Before (Ubuntu)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM ubuntu/python:3.13-24.04_stable AS builder + +ENV LANG=C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +WORKDIR /app + +RUN python -m venv /app/venv +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements.txt + +FROM ubuntu/python:3.13-24.04_stable + +WORKDIR /app + +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +COPY app.py ./ +COPY --from=builder /app/venv /app/venv + +ENTRYPOINT [ "python", "/app/app.py" ] +``` + +{{< /tab >}} +{{< tab name="Before (Wolfi)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM cgr.dev/chainguard/python:latest-dev AS builder + +ENV LANG=C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +WORKDIR /app + +RUN python -m venv /app/venv +COPY requirements.txt . + +# Install any additional packages if needed using apk +# RUN apk add --no-cache gcc musl-dev + +RUN pip install --no-cache-dir -r requirements.txt + +FROM cgr.dev/chainguard/python:latest + +WORKDIR /app + +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +COPY app.py ./ +COPY --from=builder /app/venv /app/venv + +ENTRYPOINT [ "python", "/app/app.py" ] +``` + +{{< /tab >}} +{{< tab name="Before (DOI)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM python:latest AS builder + +ENV LANG=C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +WORKDIR /app + +RUN python -m venv /app/venv +COPY requirements.txt . + +# Install any additional packages if needed using apt +# RUN apt-get update && apt-get install -y gcc && rm -rf /var/lib/apt/lists/* + +RUN pip install --no-cache-dir -r requirements.txt + +FROM python:latest + +WORKDIR /app + +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +COPY app.py ./ +COPY --from=builder /app/venv /app/venv + +ENTRYPOINT [ "python", "/app/app.py" ] +``` + +{{< /tab >}} +{{< tab name="After (multi-stage)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +# === Build stage: Install dependencies and create virtual environment === +FROM dhi.io/python:3.13-alpine3.21-dev AS builder + +ENV LANG=C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +WORKDIR /app + +RUN python -m venv /app/venv +COPY requirements.txt . + +# Install any additional packages if needed using apk +# RUN apk add --no-cache gcc musl-dev + +RUN pip install --no-cache-dir -r requirements.txt + +# === Final stage: Create minimal runtime image === +FROM dhi.io/python:3.13-alpine3.21 + +WORKDIR /app + +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +COPY app.py ./ +COPY --from=builder /app/venv /app/venv + +ENTRYPOINT [ "python", "/app/app.py" ] +``` + +{{< /tab >}} +{{< tab name="After (single-stage)" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM dhi.io/python:3.13-alpine3.21-dev + +ENV LANG=C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +WORKDIR /app + +RUN python -m venv /app/venv +COPY requirements.txt . + +# Install any additional packages if needed using apk +# RUN apk add --no-cache gcc musl-dev + +RUN pip install --no-cache-dir -r requirements.txt + +COPY app.py ./ + +ENTRYPOINT [ "python", "/app/app.py" ] +``` + +{{< /tab >}} +{{< /tabs >}} diff --git a/content/manuals/dhi/migration/migrate-from-doi.md b/content/manuals/dhi/migration/migrate-from-doi.md new file mode 100644 index 00000000000..521733545f8 --- /dev/null +++ b/content/manuals/dhi/migration/migrate-from-doi.md @@ -0,0 +1,114 @@ +--- +title: Migrate from Alpine or Debian +description: Step-by-step guide to migrate from Docker Official Images to Docker Hardened Images +weight: 20 +keywords: docker official images, doi, migration, dhi, alpine, debian +--- + +Docker Hardened Images (DHI) come in both [Alpine-based and Debian-based +variants](../explore/available.md). In many cases, migrating from another image +based on these distributions is as simple as changing the base image in your +Dockerfile. + +This guide helps you migrate from an existing Alpine-based or Debian-based +Docker Official Image (DOI) to DHI. + +If you're currently using a Debian-based Docker Official Image, migrate to the +Debian-based DHI variant. If you're using an Alpine-based image, migrate to the +Alpine-based DHI variant. This minimizes changes to package management and +dependencies during migration. + +## Key differences + +When migrating from non-hardened images to DHI, be aware of these key differences: + +| Item | Non-hardened images | Docker Hardened Images | +|:-------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Package management | Package managers generally available in all images. | Package managers generally only available in images with a `dev` tag. Runtime images don't contain package managers. Use multi-stage builds and copy necessary artifacts from the build stage to the runtime stage. | +| Non-root user | Usually runs as root by default | Runtime variants run as the nonroot user by default. Ensure that necessary files and directories are accessible to the nonroot user. | +| Multi-stage build | Optional | Recommended. Use images with a `dev` or `sdk` tags for build stages and non-dev images for runtime. | +| TLS certificates | May need to be installed | Contain standard TLS certificates by default. There is no need to install TLS certificates. | +| Ports | Can bind to privileged ports (below 1024) when running as root | Run as a nonroot user by default. Applications can't bind to privileged ports (below 1024) when running in Kubernetes or in Docker Engine versions older than 20.10. Configure your application to listen on port 1025 or higher inside the container. | +| Entry point | Varies by image | May have different entry points than Docker Official Images. Inspect entry points and update your Dockerfile if necessary. | +| Shell | Shell generally available in all images | Runtime images don't contain a shell. Use `dev` images in build stages to run shell commands and then copy artifacts to the runtime stage. | + +## Migration steps + +### Step 1: Update the base image in your Dockerfile + +Update the base image in your application's Dockerfile to a hardened image. This +is typically going to be an image tagged as `dev` or `sdk` because it has the tools +needed to install packages and dependencies. + +The following example diff snippet from a Dockerfile shows the old base image +replaced by the new hardened image. + +> [!NOTE] +> +> You must authenticate to `dhi.io` before you can pull Docker Hardened Images. +> Use your Docker ID credentials (the same username and password you use for +> Docker Hub). If you don't have a Docker account, [create +> one](../../accounts/create-account.md) for free. +> +> Run `docker login dhi.io` to authenticate. + + +```diff +- ## Original base image +- FROM golang:1.25 + ++ ## Updated to use hardened base image ++ FROM dhi.io/golang:1.25-debian12-dev +``` + +Note that DHI does not have a `latest` tag in order to promote best practices +around image versioning. Ensure that you specify the appropriate version tag for +your image. To find the right tag, explore the available tags in the [DHI +Catalog](https://hub.docker.com/hardened-images/catalog/). In addition, the +distribution base is specified in the tag (for example, `-alpine3.22` or +`-debian12`), so be sure to select the correct variant for your application. + +### Step 2: Update the runtime image in your Dockerfile + +> [!NOTE] +> +> Multi-stage builds are recommended to keep your final image minimal and +> secure. Single-stage builds are supported, but they include the full `dev` image +> and therefore result in a larger image with a broader attack surface. + +To ensure that your final image is as minimal as possible, you should use a +[multi-stage build](/manuals/build/building/multi-stage.md). All stages in your +Dockerfile should use a hardened image. While intermediary stages will typically +use images tagged as `dev` or `sdk`, your final runtime stage should use a runtime image. + +Utilize the build stage to compile your application and copy the resulting +artifacts to the final runtime stage. This ensures that your final image is +minimal and secure. + +The following example shows a multi-stage Dockerfile with a build stage and runtime stage: + +```dockerfile +# Build stage +FROM dhi.io/golang:1.25-debian12-dev AS builder +WORKDIR /app +COPY . . +RUN go build -o myapp + +# Runtime stage +FROM dhi.io/golang:1.25-debian12 +WORKDIR /app +COPY --from=builder /app/myapp . +ENTRYPOINT ["/app/myapp"] +``` + +After updating your Dockerfile, build and test your application. If you encounter +issues, see the [Troubleshoot](/manuals/dhi/troubleshoot.md) guide for common +problems and solutions. + +## Language-specific examples + +See the examples section for language-specific migration examples: + +- [Go](examples/go.md) +- [Python](examples/python.md) +- [Node.js](examples/node.md) diff --git a/content/manuals/dhi/migration/migrate-from-ubuntu.md b/content/manuals/dhi/migration/migrate-from-ubuntu.md new file mode 100644 index 00000000000..09674e8484c --- /dev/null +++ b/content/manuals/dhi/migration/migrate-from-ubuntu.md @@ -0,0 +1,140 @@ +--- +title: Migrate from Ubuntu +description: Step-by-step guide to migrate from Ubuntu-based images to Docker Hardened Images +weight: 25 +keywords: ubuntu, migration, dhi, debian, docker hardened images +--- + +Docker Hardened Images (DHI) come in [Alpine-based and Debian-based +variants](../explore/available.md). When migrating from an Ubuntu-based image, +you should migrate to the Debian-based DHI variant, as both Ubuntu and Debian +share the same package management system (APT) and underlying architecture, +making migration straightforward. + +This guide helps you migrate from an existing Ubuntu-based image to DHI. + +## Key differences + +When migrating from Ubuntu-based images to DHI Debian, be aware of these key differences: + +| Item | Ubuntu-based images | Docker Hardened Images | +|:-------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Package management | Varies by image. Some include APT package manager, others don't | Package managers generally only available in images with a `dev` tag. Runtime images don't contain package managers. Use multi-stage builds and copy necessary artifacts from the build stage to the runtime stage. | +| Non-root user | Varies by image. Some run as root, others as non-root | Runtime variants run as the non-root user by default. Ensure that necessary files and directories are accessible to the non-root user. | +| Multi-stage build | Recommended | Recommended. Use images with a `dev` or `sdk` tags for build stages and non-dev images for runtime. | +| Ports | Can bind to privileged ports (under 1024) when running as root | Run as a non-root user by default. Applications can't bind to privileged ports (under 1024) when running in Kubernetes or in Docker Engine versions older than 20.10. Configure your application to listen on port 1025 and up inside the container. | +| Entry point | Varies by image | May have different entry points than Ubuntu-based images. Inspect entry points and update your Dockerfile if necessary. | +| Shell | Varies by image. Some include a shell, others don't | Runtime images don't contain a shell. Use `dev` images in build stages to run shell commands and then copy artifacts to the runtime stage. | +| Package repositories | Uses Ubuntu package repositories | Uses Debian package repositories. Most packages have similar names, but some may differ. | + +## Migration steps + +### Step 1: Update the base image in your Dockerfile + +Update the base image in your application's Dockerfile to a hardened image. This +is typically going to be an image tagged as `dev` or `sdk` because it has the tools +needed to install packages and dependencies. + +The following example diff snippet from a Dockerfile shows the old Ubuntu-based image +replaced by the new DHI Debian image. + +> [!NOTE] +> +> You must authenticate to `dhi.io` before you can pull Docker Hardened Images. +> Use your Docker ID credentials (the same username and password you use for +> Docker Hub). If you don't have a Docker account, [create +> one](../../accounts/create-account.md) for free. +> +> Run `docker login dhi.io` to authenticate. + + +```diff +- ## Original Ubuntu-based image +- FROM ubuntu/go:1.22-24.04 + ++ ## Updated to use hardened Debian-based image ++ FROM dhi.io/golang:1-debian13-dev +``` + +To find the right tag, explore the available tags in the [DHI +Catalog](https://hub.docker.com/hardened-images/catalog/). + +### Step 2: Update package installation commands + +Since Ubuntu and Debian both use APT for package management, most package +installation commands remain similar. However, you need to ensure that package +installations only occur in `dev` or `sdk` images, as runtime images don't +contain package managers. + +```diff +- ## Ubuntu: Installing packages +- FROM ubuntu/go:1.22-24.04 +- RUN apt-get update && apt-get install -y \ +- git \ +- && rm -rf /var/lib/apt/lists/* + ++ ## DHI: Use a language-specific dev image with package manager ++ FROM dhi.io/golang:1-debian13-dev ++ RUN apt-get update && apt-get install -y \ ++ git \ ++ && rm -rf /var/lib/apt/lists/* +``` + +Most Ubuntu packages are available in Debian with the same names. If you +encounter missing packages, you can search for equivalent packages using the +[Debian package search](https://packages.debian.org/) website. + +### Step 3: Update the runtime image in your Dockerfile + +> [!NOTE] +> +> Multi-stage builds are recommended to keep your final image minimal and +> secure. Single-stage builds are supported, but they include the full `dev` image +> and therefore result in a larger image with a broader attack surface. + +To ensure that your final image is as minimal as possible, you should use a +[multi-stage build](/manuals/build/building/multi-stage.md). All stages in your +Dockerfile should use a hardened image. While intermediary stages will typically +use images tagged as `dev` or `sdk`, your final runtime stage should use a runtime image. + +Utilize the build stage to install dependencies and prepare your application, +then copy the resulting artifacts to the final runtime stage. This ensures that +your final image is minimal and secure. + +The following example shows a multi-stage Dockerfile migrating from Ubuntu to DHI Debian: + +```dockerfile +# Build stage +FROM dhi.io/golang:1-debian13-dev AS builder +WORKDIR /app + +# Install system dependencies (only available in dev images) +RUN apt-get update && apt-get install -y \ + git \ + && rm -rf /var/lib/apt/lists/* + +# Copy application files +COPY go.mod go.sum ./ +RUN go mod download + +COPY . . +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags="-s -w" -o main . + +# Runtime stage +FROM dhi.io/golang:1-debian13 +WORKDIR /app + +# Copy compiled binary from builder +COPY --from=builder /app/main /app/main + +# Run the application +ENTRYPOINT ["/app/main"] +``` + +## Language-specific examples + +See the examples section for language-specific migration examples: + +- [Go](examples/go.md) +- [Python](examples/python.md) +- [Node.js](examples/node.md) diff --git a/content/manuals/dhi/migration/migrate-from-wolfi.md b/content/manuals/dhi/migration/migrate-from-wolfi.md new file mode 100644 index 00000000000..5896451dba4 --- /dev/null +++ b/content/manuals/dhi/migration/migrate-from-wolfi.md @@ -0,0 +1,95 @@ +--- +title: Migrate from Wolfi +description: Step-by-step guide to migrate from Wolfi distribution images to Docker Hardened Images +weight: 30 +keywords: wolfi, chainguard, migration, dhi +--- + +This guide helps you migrate from Wolfi-based images to Docker Hardened +Images (DHI). Generally, the migration process is straightforward since Wolfi is +Alpine-like and DHI provides an Alpine-based hardened image. + +Like other hardened images, DHI provides comprehensive +[attestations](/dhi/core-concepts/attestations/) including SBOMs and provenance, +allowing you to [verify](/manuals/dhi/how-to/verify.md) image signatures and +[scan](/manuals/dhi/how-to/scan.md) for vulnerabilities to ensure the security +and integrity of your images. + +## Migration steps + +The following example demonstrates how to migrate a Dockerfile from a +Wolfi-based image to an Alpine-based Docker Hardened Image. + +### Step 1: Update the base image in your Dockerfile + +Update the base image in your application's Dockerfile to a hardened image. This +is typically going to be an image tagged as `dev` or `sdk` because it has the tools +needed to install packages and dependencies. + +The following example diff snippet from a Dockerfile shows the old base image +replaced by the new hardened image. + +> [!NOTE] +> +> You must authenticate to `dhi.io` before you can pull Docker Hardened Images. +> Use your Docker ID credentials (the same username and password you use for +> Docker Hub). If you don't have a Docker account, [create +> one](../../accounts/create-account.md) for free. +> +> Run `docker login dhi.io` to authenticate. + +```diff +- ## Original base image +- FROM cgr.dev/chainguard/go:latest-dev + ++ ## Updated to use hardened base image ++ FROM dhi.io/golang:1.25-alpine3.22-dev +``` + +Note that DHI does not have a `latest` tag in order to promote best practices +around image versioning. Ensure that you specify the appropriate version tag for your image. To find the right tag, explore the available tags in the [DHI Catalog](https://hub.docker.com/hardened-images/catalog/). + +### Step 2: Update the runtime image in your Dockerfile + +> [!NOTE] +> +> Multi-stage builds are recommended to keep your final image minimal and +> secure. Single-stage builds are supported, but they include the full `dev` image +> and therefore result in a larger image with a broader attack surface. + +To ensure that your final image is as minimal as possible, you should use a +[multi-stage build](/manuals/build/building/multi-stage.md). All stages in your +Dockerfile should use a hardened image. While intermediary stages will typically +use images tagged as `dev` or `sdk`, your final runtime stage should use a runtime image. + +Utilize the build stage to compile your application and copy the resulting +artifacts to the final runtime stage. This ensures that your final image is +minimal and secure. + +The following example shows a multi-stage Dockerfile with a build stage and runtime stage: + +```dockerfile +# Build stage +FROM dhi.io/golang:1.25-alpine3.22-dev AS builder +WORKDIR /app +COPY . . +RUN go build -o myapp + +# Runtime stage +FROM dhi.io/golang:1.25-alpine3.22 +WORKDIR /app +COPY --from=builder /app/myapp . +ENTRYPOINT ["/app/myapp"] +``` + +After updating your Dockerfile, build and test your application. If you encounter +issues, see the [Troubleshoot](/manuals/dhi/troubleshoot.md) guide for common +problems and solutions. + +## Language-specific examples + +See the examples section for language-specific migration examples: + +- [Go](examples/go.md) +- [Python](examples/python.md) +- [Node.js](examples/node.md) diff --git a/content/manuals/dhi/migration/migrate-with-ai.md b/content/manuals/dhi/migration/migrate-with-ai.md new file mode 100644 index 00000000000..edc075c03ea --- /dev/null +++ b/content/manuals/dhi/migration/migrate-with-ai.md @@ -0,0 +1,42 @@ +--- +title: Migrate using Gordon +linktitle: AI-assisted migration +description: Use Gordon to automatically migrate your Dockerfile to Docker Hardened Images +weight: 15 +keywords: ai assistant, migrate dockerfile, docker hardened images, automated migration +params: + sidebar: + badge: + color: violet + text: Experimental +--- + +{{< summary-bar feature_name="Gordon DHI migration" >}} + +You can use Gordon to automatically migrate your +Dockerfile to use Docker Hardened Images (DHI). + +1. Ensure Gordon is [enabled](/manuals/ai/gordon.md#enable-ask-gordon). +2. In the terminal, navigate to the directory containing your Dockerfile. +3. Start a conversation with the assistant: + ```bash + docker ai + ``` +4. Type: + ```console + "Migrate my dockerfile to DHI" + ``` +5. Follow the conversation with the assistant. The assistant will edit your Dockerfile, so when + it requests access to the filesystem and more, type `yes` to allow the assistant to proceed. + +When the migration is complete, you see a success message: + +```text +The migration to Docker Hardened Images (DHI) is complete. The updated Dockerfile +successfully builds the image, and no vulnerabilities were detected in the final image. +The functionality and optimizations of the original Dockerfile have been preserved. +``` + +> [!IMPORTANT] +> +> As with any AI tool, you must verify the assistant's edits and test your image. diff --git a/content/manuals/dhi/resources.md b/content/manuals/dhi/resources.md new file mode 100644 index 00000000000..dcc6d76126f --- /dev/null +++ b/content/manuals/dhi/resources.md @@ -0,0 +1,90 @@ +--- +title: Docker Hardened Images resources +linktitle: Additional resources +description: Additional resources including product information, blog posts, and GitHub repositories for Docker Hardened Images +weight: 999 +--- + +This page provides links to additional resources related to Docker Hardened +Images (DHI), including blog posts, guides, Docker Hub resources, and GitHub +repositories. + +For product information and feature comparison, visit the [Docker Hardened +Images product page](https://www.docker.com/products/hardened-images/). + +## Blog posts + +The following blog posts provide insights into Docker Hardened Images, security +features, and announcements: + +| Date published | Title | +|------|-------| +| April 14, 2026 | [Why We Chose the Harder Path: Docker Hardened Images, One Year Later](https://www.docker.com/blog/why-we-chose-the-harder-path-docker-hardened-images-one-year-later/) | +| April 8, 2026 | [Reclaim Developer Hours through Smarter Vulnerability Prioritization with Docker and Mend.io](https://www.docker.com/blog/reclaim-developer-hours-through-smarter-vulnerability-prioritization-with-docker-and-mend-io/) | +| March 3, 2026 | [Announcing Docker Hardened System Packages](https://www.docker.com/blog/announcing-docker-hardened-system-packages/) | +| January 25, 2026 | [Making the Most of Your Docker Hardened Images Enterprise Trial - Part 3](https://www.docker.com/blog/making-the-most-of-your-docker-hardened-images-enterprise-trial-part-3/) | +| January 24, 2026 | [Making the Most of Your Docker Hardened Images Enterprise Trial - Part 2](https://www.docker.com/blog/making-the-most-of-your-docker-hardened-images-enterprise-trial-part-2/) | +| December 19, 2025 | [Docker Hardened Images: Security Independently Validated by SRLabs](https://www.docker.com/blog/docker-hardened-images-security-independently-validated-by-srlabs/) | +| December 17, 2025 | [A Safer Container Ecosystem with Docker: Free Docker Hardened Images](https://www.docker.com/blog/docker-hardened-images-for-every-developer/) | +| November 14, 2025 | [Making the Most of Your Docker Hardened Images Enterprise Trial - Part 1](https://www.docker.com/blog/making-the-most-of-your-docker-hardened-images-trial-part-1/) | +| October 15, 2025 | [Docker Hardened Images: Crafted by Humans, Protected by AI](https://www.docker.com/blog/docker-hardened-images-crafted-by-humans-protected-by-ai/) | +| September 29, 2025 | [Expanding Docker Hardened Images: Secure Helm Charts for Deployments](https://www.docker.com/blog/docker-hardened-images-helm-charts-beta/) | +| August 6, 2025 | [The Next Evolution of Docker Hardened Images: Customizable, FedRAMP Ready, AI Migration Agent, and Deeper Integrations](https://www.docker.com/blog/the-next-evolution-of-docker-hardened-images/) | +| August 6, 2025 | [Accelerating FedRAMP Compliance with Docker Hardened Images](https://www.docker.com/blog/fedramp-compliance-with-hardened-images/) | +| May 19, 2025 | [Introducing Docker Hardened Images: Secure, Minimal, and Ready for Production](https://www.docker.com/blog/introducing-docker-hardened-images/) | + +## Guides + +For guides that demonstrate how to use Docker Hardened Images in various +scenarios, see the [guides section filtered by DHI](/guides/?tags=dhi). + +## Docker Hub + +Docker Hardened Images are available on Docker Hub: + +- [Docker Hardened Images Catalog](https://dhi.io): Browse and pull Docker + Hardened Images from the official catalog +- [Docker Hub MCP Server](https://hub.docker.com/mcp/server/dockerhub/overview): + MCP server to list Docker Hardened Images (DHIs) available in your + organizations + +## GitHub repositories and resources + +Docker Hardened Images repositories are available in the +[docker-hardened-images](https://github.com/docker-hardened-images) GitHub +organization: + +- [Catalog](https://github.com/docker-hardened-images/catalog): DHI definition + files and catalog metadata +- [Advisories](https://github.com/docker-hardened-images/advisories): CVE + advisories for OSS packages distributed with DHIs + - [Scanner vendor integration guide](https://github.com/docker-hardened-images/advisories/tree/main/integration): + Reference for scanner vendors integrating DHI VEX support +- [Keyring](https://github.com/docker-hardened-images/keyring): Public signing + keys and verification tools +- [Log](https://github.com/docker-hardened-images/log): Log of references (tag > + digest) for Docker Hardened Images +- [dhictl](https://github.com/docker-hardened-images/dhictl): Command-line + interface for managing and interacting with Docker Hardened Images +- [Terraform Provider](https://github.com/docker-hardened-images/terraform-provider-dhi): + Terraform provider for managing DHI resources + ([Terraform Registry](https://registry.terraform.io/providers/docker-hardened-images/dhi/latest/docs)) +- [Discussions](https://github.com/orgs/docker-hardened-images/discussions): + Community forum and product discussions + +## Additional resources + +- [Start a free trial](https://hub.docker.com/hardened-images/start-free-trial): + Explore DHI Select and Enterprise features including FIPS/STIG variants, customization, + and SLA-backed support +- [Support Service Level Agreement](https://docs.docker.com/go/dhi-sla/): + Review the SLA commitments for DHI Select and Enterprise subscriptions +- [Request a demo](https://www.docker.com/products/hardened-images/#getstarted): Get a + personalized demo and information about DHI Select and Enterprise subscriptions +- [Request an image](https://github.com/docker-hardened-images/catalog/issues): + Submit a request for a specific Docker Hardened Image +- [Contact Sales](https://www.docker.com/pricing/contact-sales/): Connect with + Docker sales team for enterprise inquiries +- [Docker Support](https://www.docker.com/support/): Access support resources + for DHI Select and Enterprise customers + diff --git a/content/manuals/dhi/troubleshoot.md b/content/manuals/dhi/troubleshoot.md new file mode 100644 index 00000000000..f3d86b07c82 --- /dev/null +++ b/content/manuals/dhi/troubleshoot.md @@ -0,0 +1,186 @@ +--- +title: Troubleshoot +description: Resolve common issues when building, running, or debugging Docker Hardened Images, such as non-root behavior, missing shells, and port access. +weight: 40 +tags: [Troubleshooting] +keywords: troubleshoot hardened image, docker debug container, non-root permission issue, missing shell error, no package manager, debug, hardened images, DHI, troubleshooting, ephemeral container, docker debug, non-root containers, hardened container image, debug secure container +aliases: +- /dhi/how-to/debug/ +--- + +This page covers debugging techniques and common issues you may encounter while +migrating to or using Docker Hardened Images (DHIs). + +## General debugging + +Docker Hardened Images prioritize minimalism and security, which means +they intentionally leave out many common debugging tools (like shells or package +managers). This makes direct troubleshooting difficult without introducing risk. +To address this, you can use [Docker +Debug](/reference/cli/docker/debug/), a secure workflow that +temporarily attaches an ephemeral debug container to a running service or image +without modifying the original image. + +This section shows how to debug Docker Hardened Images locally during development. +With Docker Debug, you can also debug containers remotely using the `--host` +option. + +### Use Docker Debug + +#### Step 1: Run a container from a Hardened Image + +Start with a DHI-based container that simulates an issue: + +```console +$ docker run -d --name myapp dhi.io/python:3.13 python -c "import time; time.sleep(300)" +``` + +This container doesn't include a shell or tools like `ps`, `top`, or `cat`. + +If you try: + +```console +$ docker exec -it myapp sh +``` + +You'll see: + +```console +exec: "sh": executable file not found in $PATH +``` + +#### Step 2: Use Docker Debug to inspect the container + +Use the `docker debug` command to attach a temporary, tool-rich debug container to the running instance. + +```console +$ docker debug myapp +``` + +From here, you can inspect running processes, network status, or mounted files. + +For example, to check running processes: + +```console +$ ps aux +``` + +Type `exit` to leave the container when done. + +### Alternative debugging approaches + +In addition to using Docker Debug, you can also use the following approaches for +debugging DHI containers. + +#### Use the -dev variant + +Docker Hardened Images offer a `-dev` variant that includes a shell +and a package manager to install debugging tools. Simply replace the image tag +with `-dev`: + +```console +$ docker run -it --rm dhi.io/python:3.13-dev sh +``` + +Type `exit` to leave the container when done. Note that using the `-dev` variant +increases the attack surface and it is not recommended as a runtime for +production environments. + +#### Mount debugging tools with image mounts + +You can use the image mount feature to mount debugging tools into your container +without modifying the base image. + +##### Step 1: Run a container from a hardened image + +Start with a DHI-based container that simulates an issue: + +```console +$ docker run -d --name myapp dhi.io/python:3.13 python -c "import time; time.sleep(300)" +``` + +##### Step 2: Mount debugging tools into the container + +Run a new container that mounts a tool-rich image (like `busybox`) into +the running container's namespace: + +```console +$ docker run --rm -it --pid container:myapp \ + --mount type=image,source=busybox,destination=/dbg,ro \ + dhi.io/python:3.13 /dbg/bin/sh +``` + +This mounts the BusyBox image at `/dbg`, giving you access to its tools while +keeping your original container image unchanged. Since the hardened Python image +doesn't include standard utilities, you need to use the full path to the mounted +tools: + +```console +$ /dbg/bin/ls / +$ /dbg/bin/ps aux +$ /dbg/bin/cat /etc/os-release +``` + +Type `exit` to leave the container when done. + +## Common issues + +The following are specific issues you may encounter when working with Docker +Hardened Images, along with recommended solutions. + +### Permissions + +DHIs run as a nonroot user by default for enhanced security. This can result in +permission issues when accessing files or directories. Ensure your application +files and runtime directories are owned by the expected UID/GID or have +appropriate permissions. + +To find out which user a DHI runs as, check the repository page for the image on +Docker Hub. See [View image variant +details](./how-to/explore.md#image-variant-details) for more information. + +### Privileged ports + +Nonroot containers cannot bind to ports below 1024 by default. This is enforced +by both the container runtime and the kernel (especially in Kubernetes and +Docker Engine < 20.10). + +Inside the container, configure your application to listen on an unprivileged +port (1025 or higher). For example `docker run -p 80:8080 my-image` maps +port 8080 in the container to port 80 on the host, allowing you to access it +without needing root privileges. + +### No shell + +Runtime DHIs omit interactive shells like `sh` or `bash`. If your build or +tooling assumes a shell is present (e.g., for `RUN` instructions), use a `dev` +variant of the image in an earlier build stage and copy the final artifact into +the runtime image. + +To find out which shell, if any, a DHI has, check the repository page for the +image on Docker Hub. See [View image variant +details](./how-to/explore.md#image-variant-details) for more information. + +Also, use Docker Debug when you need shell access to a running container. For +more details, see [General debugging](#general-debugging). + +### Entry point differences + +DHIs may define different entry points compared to Docker Official Images (DOIs) +or other community images. + +To find out the ENTRYPOINT or CMD for a DHI, check the repository page for the +image on Docker Hub. See [View image variant +details](./how-to/explore.md#image-variant-details) for more information. + +### No package manager + +Runtime Docker Hardened Images are stripped down for security and minimal attack +surface. As a result, they don't include a package manager such as `apk` or +`apt`. This means you can't install additional software directly in the runtime +image. + +If your build or application setup requires installing packages (for example, to +compile code, install runtime dependencies, or add diagnostic tools), use a `dev` +variant of the image in a build stage. Then, copy only the necessary artifacts +into the final runtime image. \ No newline at end of file diff --git a/content/manuals/docker-hub/_index.md b/content/manuals/docker-hub/_index.md index 71f8c076729..07c4ccfc781 100644 --- a/content/manuals/docker-hub/_index.md +++ b/content/manuals/docker-hub/_index.md @@ -5,7 +5,7 @@ title: Docker Hub weight: 30 params: sidebar: - group: Products + group: Supply chain security grid: - title: Quickstart description: Step-by-step instructions on getting started on Docker Hub. @@ -20,6 +20,10 @@ grid: or the Docker community. icon: inbox link: /docker-hub/repos +- title: Settings + description: Learn about settings in Docker Hub. + icon: settings + link: /docker-hub/settings - title: Organizations description: Learn about organization administration. icon: store diff --git a/content/manuals/docker-hub/image-library/_index.md b/content/manuals/docker-hub/image-library/_index.md index 3d41410ff1f..ff2847b0e7b 100644 --- a/content/manuals/docker-hub/image-library/_index.md +++ b/content/manuals/docker-hub/image-library/_index.md @@ -14,9 +14,9 @@ workflows, making it easier to share and collaborate. In this section, learn about: - [Search](./search.md): Discover how to browse and search Docker Hub's extensive resources. -- [Trusted content](./trusted-content.md): Dive into Docker Official Images, - Verified Publisher content, and Sponsored Open Source images, all vetted for - security and reliability to streamline your workflows. -- [Catalogs](./catalogs.md): Explore specialized collections like the generative AI catalog. +- [Trusted content](./trusted-content.md): Dive into Docker Hardened Images, + Docker Official Images, Verified Publisher content, and Sponsored Open Source + images, all vetted for security and reliability to streamline your workflows. +- [Catalogs](./catalogs.md): Explore specialized collections like the generative AI catalogs. - [Mirroring](./mirror.md): Learn how to create a mirror of Docker Hub's container image library as a pull-through cache. \ No newline at end of file diff --git a/content/manuals/docker-hub/image-library/catalogs.md b/content/manuals/docker-hub/image-library/catalogs.md index 713df0628b2..104543ffe45 100644 --- a/content/manuals/docker-hub/image-library/catalogs.md +++ b/content/manuals/docker-hub/image-library/catalogs.md @@ -1,5 +1,5 @@ --- -description: Explore specialized Docker Hub collections like the Generative AI catalog. +description: Explore specialized Docker Hub collections like the generative AI catalogs. keywords: Docker Hub, Hub, catalog title: Docker Hub catalogs linkTitle: Catalogs @@ -19,48 +19,42 @@ Docker Hub: - Accelerate development: Quickly integrate advanced capabilities into your applications without the hassle of extensive research or setup. -The generative AI catalog is the first catalog in Docker Hub, offering -specialized content for AI development. +The following sections provide an overview of the key catalogs available in Docker Hub. -## Generative AI catalog +## MCP Catalog -The [generative AI catalog](https://hub.docker.com/catalogs/gen-ai) makes it -easy to explore and add AI capabilities to your applications. With trusted, -ready-to-use content and comprehensive documentation, you can skip the hassle of -sorting through countless tools and configurations. Instead, focus your time and -energy on creating innovative AI-powered applications. +The [MCP Catalog](https://hub.docker.com/mcp/) is a centralized, trusted +registry for discovering, sharing, and running Model Context Protocol +(MCP)-compatible tools. Seamlessly integrated into Docker Hub, the catalog +includes: -The generative AI catalog provides a wide range of trusted content, organized -into key areas to support diverse AI development needs: +- Over 100 verified MCP servers packaged as Docker images +- Tools from partners such as New Relic, Stripe, and Grafana +- Versioned releases with publisher verification +- Simplified pull-and-run support through Docker Desktop and Docker CLI -- Demos: Ready-to-deploy examples showcasing generative AI capabilities. These - demos provide a hands-on way to explore AI tools and frameworks, making it - easier to understand how they can be integrated into real-world applications. -- Model Context Protocol (MCP) servers: MCP servers provide reusable toolsets - that can be used across clients, like Claude Desktop. -- Models: Pre-trained AI models for tasks like text generation, - Natural Language Processing (NLP), and conversational AI. These models - provide a foundation for - AI applications without requiring developers to train models from scratch. -- Applications and end-to-end platforms: Comprehensive platforms and tools that - simplify AI application development, including low-code solutions and - frameworks for building multi-agent and Retrieval-Augmented Generation (RAG) - applications. -- Model deployment and serving: Tools and frameworks that enable developers to - efficiently deploy and serve AI models in production environments. These - resources include pre-configured stacks for GPUs and other specialized - hardware, ensuring performance at scale. -- Orchestration: Solutions for managing complex AI workflows, such as workflow - engines, Large Language Model (LLM) application frameworks, and lifecycle management - tools, to help streamline development and operations. -- Machine learning frameworks: Popular frameworks like TensorFlow and PyTorch - that provide the building blocks for creating, training, and fine-tuning - machine learning models. -- Databases: Databases optimized for AI workloads, including vector databases - for similarity search, time-series databases for analytics, and NoSQL - solutions for handling unstructured data. +Each server runs in an isolated container to ensure consistent behavior and +minimize configuration headaches. For developers working with Claude Desktop or +other MCP clients, the catalog provides an easy way to extend functionality with +drop-in tools. -> [!NOTE] -> -> For publishers, [contact us](https://www.docker.com/partners/programs/) to -> join the generative AI catalog. \ No newline at end of file +To learn more about MCP servers, see [MCP Catalog and Toolkit](../../ai/mcp-catalog-and-toolkit/_index.md). + +## AI Models Catalog + +The [AI Models Catalog](https://hub.docker.com/catalogs/models/) provides +curated, trusted models that work with [Docker Model +Runner](../../ai/model-runner/_index.md). This catalog is designed to make AI +development more accessible by offering pre-packaged, ready-to-use models that +you can pull, run, and interact with using familiar Docker tools. + +With the AI Models Catalog and Docker Model Runner, you can: + +- Pull and serve models from Docker Hub or any OCI-compliant registry +- Interact with models via OpenAI-compatible APIs +- Run and test models locally using Docker Desktop or CLI +- Package and publish models using the `docker model` CLI + +Whether you're building generative AI applications, integrating LLMs into your +workflows, or experimenting with machine learning tools, the AI Models Catalog +simplifies the model management experience. diff --git a/content/manuals/docker-hub/image-library/search.md b/content/manuals/docker-hub/image-library/search.md index a78f57a39c6..8a3152918f0 100644 --- a/content/manuals/docker-hub/image-library/search.md +++ b/content/manuals/docker-hub/image-library/search.md @@ -14,15 +14,21 @@ types of content. ## Filters The search functionality includes filters to narrow down -results based on your requirements, such as products, categories, and trusted -content. This ensures that you can quickly find and access the resources best +results based on your requirements, such as products, categories, trusted +content, and publishers. This ensures that you can quickly find and access the resources best suited to your project. ### Products -Docker Hub's content library features three products, each designed to meet -specific needs of developers and organizations. These products include images, -plugins, and extensions. +Docker Hub's content library features various products, each designed to meet +specific needs of developers and organizations. These products include: + +- Images +- Extensions +- Helm charts +- Compose files +- AI models +- Docker Engine plugins #### Images @@ -42,26 +48,6 @@ reusable building blocks, reducing the need to start from scratch. Whether you're a beginner building your first container or an enterprise managing complex architectures, Docker Hub images provide a reliable foundation. -#### Plugins - -Plugins in Docker Hub let you extend and customize Docker Engine to suit -specialized requirements. Plugins integrate directly with the Docker Engine and -provide capabilities such as: - -- Network plugins: Enhance networking functionality, enabling integration with - complex network infrastructures. -- Volume plugins: Provide advanced storage options, supporting persistent and - distributed storage across various backends. -- Authorization plugins: Offer fine-grained access control to secure Docker - environments. - -By leveraging Docker plugins, teams can tailor Docker Engine to meet their -specific operational needs, ensuring compatibility with existing infrastructures -and workflows. - -To learn more about plugins, see [Docker Engine managed plugin -system](/manuals/engine/extend/_index.md). - #### Extensions Docker Hub offers extensions for Docker Desktop, which enhance its core @@ -85,6 +71,88 @@ Desktop's interface. To learn more about extensions, see [Docker Extensions](/manuals/extensions/_index.md). +#### Helm charts + +Helm charts in Docker Hub provide a streamlined way to package, configure, and +deploy Kubernetes applications. Helm is the package manager for Kubernetes, and +charts are pre-configured templates that define the resources needed to run an +application in a Kubernetes cluster. Docker Hub hosts a variety of Helm charts +that provide: + +- Application packaging: Bundle Kubernetes manifests, configurations, and + dependencies into a single, reusable chart. +- Version management: Track and manage different versions of your application + deployments. +- Configuration templating: Customize deployments with values files, making it + easy to deploy the same application across different environments. +- Dependency management: Automatically handle chart dependencies, ensuring all + required components are deployed together. + +Helm charts reduce the complexity of Kubernetes deployments, making it easier +for teams to deploy, upgrade, and manage applications in production +environments. + +#### Compose + +Docker Compose files in Docker Hub enable multi-container application +orchestration through simple YAML configuration files. Compose is a tool for +defining and running multi-container Docker applications, and Docker Hub hosts +Compose files that help you: + +- Multi-container orchestration: Define and run applications consisting of + multiple interconnected containers with a single command. +- Service configuration: Specify container images, environment variables, + networks, volumes, and dependencies in a declarative format. +- Development and testing: Quickly spin up complete application stacks for local + development, testing, or demonstration purposes. +- Environment consistency: Ensure consistent application behavior across + development, staging, and production environments. + +Compose files simplify the process of managing complex applications by +providing a clear, version-controlled definition of your entire application +stack. + +#### AI models + +Docker Hub hosts AI and machine learning models in containerized formats, +making it easier to deploy, share, and run AI applications across different +environments. These containerized AI models provide: + +- Pre-trained models: Access ready-to-use machine learning models for common + tasks such as image recognition, natural language processing, and predictive + analytics. +- Model serving: Deploy models as containerized services that can be easily + integrated into applications and scaled as needed. +- Reproducible environments: Package models with their dependencies, ensuring + consistent behavior across development and production environments. +- Framework support: Find models built with popular frameworks like TensorFlow, + PyTorch, scikit-learn, and others. + +Containerized AI models remove the need to manage infrastructure dependencies, +let you deploy models across different environments, and enable scaling as +application demands change. + +#### Plugins + +Plugins in Docker Hub let you extend and customize Docker Engine to suit +specialized requirements. Plugins integrate directly with the Docker Engine and +provide capabilities such as: + +- Network plugins: Enhance networking functionality, enabling integration with + complex network infrastructures. +- Volume plugins: Provide advanced storage options, supporting persistent and + distributed storage across various backends. +- Authorization plugins: Offer fine-grained access control to secure Docker + environments. + +By leveraging Docker plugins, teams can tailor Docker Engine to meet their +specific operational needs, ensuring compatibility with existing infrastructures +and workflows. + +To learn more about plugins, see [Docker Engine managed plugin +system](/manuals/engine/extend/_index.md). + + ### Trusted content Docker Hub's trusted content provides a curated selection of high-quality, @@ -92,11 +160,19 @@ secure images designed to give developers confidence in the reliability and security of the resources they use. These images are stable, regularly updated, and adhere to industry best practices, making them a strong foundation for building and deploying applications. Docker Hub's trusted content includes, -Docker Official Images, Verified Publisher images, and Docker-Sponsored Open -Source Software images. +Docker Hardened Images, Docker Official Images, Verified Publisher images, and +Docker-Sponsored Open Source Software images. For more details, see [Trusted content](./trusted-content.md). +### Publishers + +The **Publishers** filter lets you narrow image results by the organization +that published the image. + +Publishers of trusted content appear first. Only a select number of publishers +are shown. + ### Categories Docker Hub makes it easy to find and explore container images with categories. @@ -164,4 +240,4 @@ extension has been reviewed by Docker for quality and reliability. > [!NOTE] > > The **Reviewed by Docker** filter is only available for extensions. To make -> the filter available, you must select only the **Extensions** filter in **Products**. \ No newline at end of file +> the filter available, you must select only the **Extensions** filter in **Products**. diff --git a/content/manuals/docker-hub/image-library/trusted-content.md b/content/manuals/docker-hub/image-library/trusted-content.md index 518ccfce6db..efa4ac23574 100644 --- a/content/manuals/docker-hub/image-library/trusted-content.md +++ b/content/manuals/docker-hub/image-library/trusted-content.md @@ -12,9 +12,9 @@ Docker Hub's trusted content provides a curated selection of high-quality, secure images designed to give developers confidence in the reliability and security of the resources they use. These images are stable, regularly updated, and adhere to industry best practices, making them a strong foundation for -building and deploying applications. Docker Hub's trusted content includes, -Docker Official Images, Verified Publisher images, and Docker-Sponsored Open -Source Software images. +building and deploying applications. Docker Hub's trusted content includes +Docker Official Images, Docker Hardened Images and charts, Verified Publisher +images, and Docker-Sponsored Open Source Software images. ## Docker Official Images @@ -137,7 +137,7 @@ Docker Hub for examples on how to install packages if you are unfamiliar. ### Codenames Tags with words that look like Toy Story characters (for example, `bookworm`, -`bullseye`, and `trixie`) or adjectives (such as `focal`, `jammy`, and +`bullseye`, and `trixie`) or adjectives (such as `jammy`, and `noble`), indicate the codename of the Linux distribution they use as a base image. Debian release codenames are [based on Toy Story characters](https://en.wikipedia.org/wiki/Debian_version_history#Naming_convention), and Ubuntu's take the form of "Adjective Animal". For example, the @@ -156,6 +156,38 @@ documentation. Reading through the "How to use this image" and "Image Variants" sections will help you to understand how to use these variants. +### Troubleshooting failed pulls + +If you're experiencing failed pulls of Docker Official Images, check whether +the `DOCKER_CONTENT_TRUST` environment variable is set to `1`. Starting in +August 2025, Docker Content Trust signing certificates for Docker Official +Images began expiring. To resolve pull failures, unset the `DOCKER_CONTENT_TRUST` +environment variable. For more details, see the +[DCT retirement blog post](https://www.docker.com/blog/retiring-docker-content-trust/). + +## Docker Hardened Images + +Docker Hardened Images (DHI) are minimal, secure, and production-ready +container base and application images maintained by Docker. DHI also includes +Docker-provided hardened Helm charts built from upstream sources and published +as OCI artifacts in Docker Hub. + +DHI is designed to reduce vulnerabilities and simplify compliance while fitting +into existing Docker workflows with little to no retooling required. Docker +maintains near-zero CVEs in DHI images, and DHI images and charts include +signed security metadata such as SBOMs and provenance attestations. + +Image and chart repositories have special badges +on Docker Hub, making it easier to identify trusted DHI content. + +![Docker Hardened Image badge](../images/dhi-image-label.png) + +![Docker Hardened Chart badge](../images/dhi-chart-label.png) + +To browse available repositories, see the [Docker Hardened Images +catalog](https://hub.docker.com/hardened-images/catalog). For implementation +guidance, see [Docker Hardened Images](/dhi/). + ## Verified Publisher images The Docker Verified Publisher program provides high-quality images from @@ -169,7 +201,7 @@ Images that are part of this program have a special badge on Docker Hub making it easier for users to identify projects that Docker has verified as high-quality commercial publishers. -![Docker-Sponsored Open Source badge](../images/verified-publisher-badge-iso.png) +![Docker-Verified Publisher badge](../images/verified-publisher-badge-iso.png) ## Docker-Sponsored Open Source Software images @@ -180,4 +212,4 @@ Images that are part of this program have a special badge on Docker Hub making it easier for users to identify projects that Docker has verified as trusted, secure, and active open-source projects. -![Docker-Sponsored Open Source badge](../images/sponsored-badge-iso.png) \ No newline at end of file +![Docker-Sponsored Open Source badge](../images/sponsored-badge-iso.png) diff --git a/content/manuals/docker-hub/images/azure-create-connection.png b/content/manuals/docker-hub/images/azure-create-connection.png deleted file mode 100644 index 207a4d629c5..00000000000 Binary files a/content/manuals/docker-hub/images/azure-create-connection.png and /dev/null differ diff --git a/content/manuals/docker-hub/images/create-connection.png b/content/manuals/docker-hub/images/create-connection.png deleted file mode 100644 index 30561e1ade2..00000000000 Binary files a/content/manuals/docker-hub/images/create-connection.png and /dev/null differ diff --git a/content/manuals/docker-hub/images/dhi-chart-label.png b/content/manuals/docker-hub/images/dhi-chart-label.png new file mode 100644 index 00000000000..abe3eb6b5f5 Binary files /dev/null and b/content/manuals/docker-hub/images/dhi-chart-label.png differ diff --git a/content/manuals/docker-hub/images/dhi-image-label.png b/content/manuals/docker-hub/images/dhi-image-label.png new file mode 100644 index 00000000000..b1bf81f0d0c Binary files /dev/null and b/content/manuals/docker-hub/images/dhi-image-label.png differ diff --git a/content/manuals/docker-hub/images/official-image-badge-iso.png b/content/manuals/docker-hub/images/official-image-badge-iso.png index 7f95fe18533..e700f1b6a4e 100644 Binary files a/content/manuals/docker-hub/images/official-image-badge-iso.png and b/content/manuals/docker-hub/images/official-image-badge-iso.png differ diff --git a/content/manuals/docker-hub/images/organization-tabs.png b/content/manuals/docker-hub/images/organization-tabs.png deleted file mode 100644 index fa607aa1928..00000000000 Binary files a/content/manuals/docker-hub/images/organization-tabs.png and /dev/null differ diff --git a/content/manuals/docker-hub/images/saml-create-connection.png b/content/manuals/docker-hub/images/saml-create-connection.png deleted file mode 100644 index 3a1e8dec5cb..00000000000 Binary files a/content/manuals/docker-hub/images/saml-create-connection.png and /dev/null differ diff --git a/content/manuals/docker-hub/images/sponsored-badge-iso.png b/content/manuals/docker-hub/images/sponsored-badge-iso.png index 3281f86a519..926ac801dad 100644 Binary files a/content/manuals/docker-hub/images/sponsored-badge-iso.png and b/content/manuals/docker-hub/images/sponsored-badge-iso.png differ diff --git a/content/manuals/docker-hub/images/verified-publisher-badge-iso.png b/content/manuals/docker-hub/images/verified-publisher-badge-iso.png index 63721f3148d..1e6620814bf 100644 Binary files a/content/manuals/docker-hub/images/verified-publisher-badge-iso.png and b/content/manuals/docker-hub/images/verified-publisher-badge-iso.png differ diff --git a/content/manuals/docker-hub/images/verified-publisher-badge.png b/content/manuals/docker-hub/images/verified-publisher-badge.png index 27d99b87fcf..2b7c19e02d6 100644 Binary files a/content/manuals/docker-hub/images/verified-publisher-badge.png and b/content/manuals/docker-hub/images/verified-publisher-badge.png differ diff --git a/content/manuals/docker-hub/quickstart.md b/content/manuals/docker-hub/quickstart.md index 6d1f1d29fd0..ab66eaae14c 100644 --- a/content/manuals/docker-hub/quickstart.md +++ b/content/manuals/docker-hub/quickstart.md @@ -17,7 +17,7 @@ through creating a custom image and sharing it through Docker Hub. ## Prerequisites - [Download and install Docker](../../get-started/get-docker.md) -- [Create a Docker account](https://app.docker.com/signup) +- A verified [Docker](https://app.docker.com/signup) account ## Step 1: Find an image in Docker Hub's library @@ -115,7 +115,7 @@ You can run images from Docker Hub using the CLI or Docker Desktop Dashboard. The container logs appear after the container starts. 5. Select the **8080:80** link to open the server, or visit - [https://localhost:8080](https://localhost:8080) in your web browser. + [http://localhost:8080](http://localhost:8080) in your web browser. 6. In the Docker Desktop Dashboard, select the **Stop** button to stop the container. @@ -140,7 +140,7 @@ You can run images from Docker Hub using the CLI or Docker Desktop Dashboard. The `docker run` command automatically pulls and runs the image without the need to run `docker pull` first. To learn more about the command and its options, see the [`docker run` CLI - reference](../../reference/cli/docker/container/run.md). After running the + reference](/reference/cli/docker/container/run/). After running the command, you should see output similar to the following. ```console {collapse=true} @@ -174,7 +174,7 @@ You can run images from Docker Hub using the CLI or Docker Desktop Dashboard. ... ``` -3. Visit [https://localhost:8080](https://localhost:8080) to view the default +3. Visit [http://localhost:8080](http://localhost:8080) to view the default Nginx page and verify that the container is running. 4. In the terminal, press Ctrl+C to stop the container. @@ -216,7 +216,7 @@ customize your own images to suit specific needs. This command builds your image and tags it so that Docker understands which repository to push it to in Docker Hub. To learn more about the command and its options, see the [`docker build` CLI - reference](../../reference/cli/docker/buildx/build.md). After running the + reference](/reference/cli/docker/buildx/build/). After running the command, you should see output similar to the following. ```console {collapse=true} @@ -241,7 +241,7 @@ customize your own images to suit specific needs. $ docker run -p 8080:80 --rm /nginx-custom ``` -4. Visit [https://localhost:8080](https://localhost:8080) to view the page. You +4. Visit [http://localhost:8080](http://localhost:8080) to view the page. You should see `Hello world from Docker!`. 5. In the terminal, press CTRL+C to stop the container. @@ -262,7 +262,7 @@ customize your own images to suit specific needs. The command pushes the image to Docker Hub and automatically creates the repository if it doesn't exist. To learn more about the command, see the [`docker push` CLI - reference](../../reference/cli/docker/image/push.md). After running the + reference](/reference/cli/docker/image/push/). After running the command, you should see output similar to the following. ```console {collapse=true} @@ -323,4 +323,3 @@ these options. Add [repository information](./repos/manage/information.md) to help users find and use your image. - diff --git a/content/manuals/docker-hub/release-notes.md b/content/manuals/docker-hub/release-notes.md index 0f8e967863d..803cc20c5f6 100644 --- a/content/manuals/docker-hub/release-notes.md +++ b/content/manuals/docker-hub/release-notes.md @@ -2,7 +2,8 @@ title: Docker Hub release notes linkTitle: Release notes weight: 999 -description: Learn about the new features, bug fixes, and breaking changes for Docker +description: + Learn about the new features, bug fixes, and breaking changes for Docker Hub keywords: docker hub, whats new, release notes toc_min: 1 @@ -13,6 +14,23 @@ tags: [Release notes] Here you can learn about the latest changes, new features, bug fixes, and known issues for each Docker Hub release. +## 2026-05-06 + +### Deprecation notice + +- [Docker Hub Automated Builds](./repos/manage/builds/) + is being deprecated. Existing accounts will have access until April 1, 2027. + See [migration options](./repos/manage/builds/migrate/) + for guides on migrating to GitHub Actions or Bitbucket Pipelines. + +## 2026-02-13 + +### New + +- Administrators can now prevent creating public repositories within + organization namespaces using the [Disable public + repositories](./settings.md#disable-creation-of-public-repos) setting. + ## 2025-02-18 ### New @@ -29,7 +47,7 @@ known issues for each Docker Hub release. ### New - - You can tag Docker Hub repositories with [categories](./repos/manage/information.md#repository-categories). +- You can tag Docker Hub repositories with [categories](./repos/manage/information.md#repository-categories). ## 2023-12-11 @@ -47,13 +65,13 @@ known issues for each Docker Hub release. ## 2023-08-28 -- Organizations with SSO enabled can assign members to roles, organizations, and teams with [SCIM role mapping](scim.md#set-up-role-mapping). +- Organizations with SSO enabled can assign members to roles, organizations, and teams with [SCIM role mapping](scim.md#set-up-role-mapping). ## 2023-07-26 ### New -- Organizations can assign the [editor role](roles-and-permissions.md) to members to grant additional permissions without full administrative access. +- Organizations can assign the [editor role](/manuals/enterprise/security/roles-and-permissions/_index.md) to members to grant additional permissions without full administrative access. ## 2023-05-09 @@ -65,7 +83,7 @@ known issues for each Docker Hub release. ### New -- You can now automatically sync user updates with your Docker organizations and teams with [Group Mapping](group-mapping.md) for SSO and SCIM provisioning. +- You can now automatically sync user updates with your Docker organizations and teams with [Group Mapping](group-mapping.md) for SSO and SCIM provisioning. ## 2022-12-12 @@ -73,7 +91,6 @@ known issues for each Docker Hub release. - The new domain audit feature lets you audit your domains for users who aren't a member of your organization. - ## 2022-09-26 ### New @@ -84,13 +101,13 @@ known issues for each Docker Hub release. ### Bug fixes and enhancements -- In Docker Hub, you can now download a [registry.json](../security/for-admins/enforce-sign-in/_index.md) file or copy the commands to create a registry.json file to enforce sign-in for your organization. +- In Docker Hub, you can now download a [registry.json](/manuals/enterprise/security/enforce-sign-in/_index.md) file or copy the commands to create a registry.json file to enforce sign-in for your organization. ## 2022-09-19 ### Bug fixes and enhancements -- You can now [export a CSV file of members](../admin/organization//members.md#export-members) from organizations that you own. +- You can now [export a CSV file of members](../admin/organization/manage/members.md#export-members-csv-file) from organizations that you own. ## 2022-07-22 @@ -114,7 +131,7 @@ known issues for each Docker Hub release. ### New -- [Registry Access Management](/manuals/security/for-admins/hardened-desktop/registry-access-management.md) is now available for all Docker Business subscriptions. When enabled, your users can access specific registries in Docker Hub. +- [Registry Access Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md) is now available for all Docker Business subscriptions. When enabled, your users can access specific registries in Docker Hub. ## 2022-05-03 @@ -143,7 +160,7 @@ The updated [Docker Subscription Service Agreement](https://www.docker.com/legal - The existing Docker Free subscription has been renamed **Docker Personal**. - **No changes** to Docker Engine or any other upstream **open source** Docker or Moby project. - To understand how these changes affect you, read the [FAQs](https://www.docker.com/pricing/faq). For more information, see [Docker subscription overview](../subscription/_index.md). + To understand how these changes affect you, read the [FAQs](https://www.docker.com/pricing/faq). For more information, see [Docker subscription overview](../subscription/_index.md). ## 2021-05-05 @@ -183,7 +200,7 @@ Docker introduces the Advanced Image Management dashboard that enables you to vi Docker introduces Audit logs, a new feature that allows team owners to view a list of activities that occur at organization and repository levels. This feature begins tracking the activities from the release date, that is, **from 25 January 2021**. -For more information about this feature and for instructions on how to use it, see [Activity logs](../admin/organization/activity-logs.md). +For more information about this feature and for instructions on how to use it, see [Activity logs](../admin/activity-logs.md). ## 2020-11-10 @@ -201,98 +218,94 @@ Docker introduces Hub Vulnerability Scanning which enables you to automatically ### New features -* Docker has announced a new, per-seat pricing model to accelerate developer workflows for cloud-native development. The previous private repository/concurrent autobuild-based plans have been replaced with new **Pro** and **Team** plans that include unlimited private repositories. For more information, see [Docker subscription](../subscription/_index.md). +- Docker has announced a new, per-seat pricing model to accelerate developer workflows for cloud-native development. The previous private repository/concurrent autobuild-based plans have been replaced with new **Pro** and **Team** plans that include unlimited private repositories. For more information, see [Docker subscription](../subscription/_index.md). -* Docker has enabled download rate limits for downloads and pull requests on Docker Hub. This caps the number of objects that users can download within a specified timeframe. For more information, see [Usage and limits](/manuals/docker-hub/usage/_index.md). +- Docker has enabled download rate limits for downloads and pull requests on Docker Hub. This caps the number of objects that users can download within a specified timeframe. For more information, see [Usage and limits](/manuals/docker-hub/usage/_index.md). ## 2019-11-04 ### Enhancements -* The [repositories page](repos/_index.md) and all -related settings and tabs have been updated and moved from `cloud.docker.com` -to `hub.docker.com`. You can access the page at its new URL: [https://hub.docker.com/repositories](https://hub.docker.com/repositories). +- The [repositories page](repos/_index.md) and all + related settings and tabs have been updated and moved from `cloud.docker.com` + to `hub.docker.com`. You can access the page at its new URL: [https://hub.docker.com/repositories](https://hub.docker.com/repositories). ### Known Issues -* Scan results don't appear for some official images. +- Scan results don't appear for some official images. ## 2019-10-21 ### New features -* **Beta:** Docker Hub now supports two-factor authentication (2FA). Enable it in your account settings, under the **[Security](https://hub.docker.com/settings/security)** section. +- **Beta:** Docker Hub now supports two-factor authentication (2FA). Enable it in your account settings, under the **[Security](https://hub.docker.com/settings/security)** section. - > If you lose both your 2FA authentication device and recovery code, you may - > not be able to recover your account. + > If you lose both your 2FA authentication device and recovery code, you may + > not be able to recover your account. ### Enhancements -* As a security measure, when two-factor authentication is enabled, the Docker CLI requires a personal access token instead of a password to log in. +- As a security measure, when two-factor authentication is enabled, the Docker CLI requires a personal access token instead of a password to log in. ### Known Issues -* Scan results don't appear for some official images. - +- Scan results don't appear for some official images. ## 2019-10-02 ### Enhancements -* You can now manage teams and members straight from your [organization page](https://hub.docker.com/orgs). -Each organization page now breaks down into these tabs: - * **New:** Members - manage your members directly from this page (delete, - add, or open their teams) - * **New:** Teams - search by team or username, and open up any team page to - manage the team - * **New:** Invitees (conditional tab, only if an invite exists) - resend or - remove invitations from this tab - * Repositories - * Settings - * Billing +- You can now manage teams and members straight from your [organization page](https://hub.docker.com/orgs). + Each organization page now breaks down into these tabs: + - **New:** Members - manage your members directly from this page (delete, + add, or open their teams) + - **New:** Teams - search by team or username, and open up any team page to + manage the team + - **New:** Invitees (conditional tab, only if an invite exists) - resend or + remove invitations from this tab + - Repositories + - Settings + - Billing ### Bug fixes -* Fixed an issue where Kinematic could not connect and log in to Docker Hub. +- Fixed an issue where Kinematic could not connect and log in to Docker Hub. ### Known Issues -* Scan results don't appear for some official images. - +- Scan results don't appear for some official images. ## 2019-09-19 ### New features -* You can now [create personal access tokens](/security/for-developers/access-tokens/) in Docker Hub and use them to authenticate from the Docker CLI. Find them in your account settings, under the new **[Security](https://hub.docker.com/settings/security)** section. +- You can now [create personal access tokens](/security/access-tokens/) in Docker Hub and use them to authenticate from the Docker CLI. Find them in your account settings, under the new **[Security](https://hub.docker.com/settings/security)** section. ### Known Issues -* Scan results don't appear for some official images. - +- Scan results don't appear for some official images. ## 2019-09-16 ### Enhancements -* The [billing page](../subscription/change.md) for personal accounts has been updated. You can access the page at its new URL: [https://hub.docker.com/billing/plan](https://hub.docker.com/billing/plan). +- The [billing page](../subscription/change.md) for personal accounts has been updated. You can access the page at its new URL: [https://hub.docker.com/billing/plan](https://hub.docker.com/billing/plan). ### Known Issues -* Scan results don't appear for some official images. - +- Scan results don't appear for some official images. ## 2019-09-05 ### Enhancements -* The `Tags` tab on an image page now provides additional information for each tag: - * A list of digests associated with the tag - * The architecture it was built on - * The OS - * The user who most recently updated an image for a specific tag -* The security scan summary for Docker Official Images has been updated. +- The `Tags` tab on an image page now provides additional information for each tag: + - A list of digests associated with the tag + - The architecture it was built on + - The OS + - The user who most recently updated an image for a specific tag +- The security scan summary for Docker Official Images has been updated. ### Known Issues -* Scan results don't appear for some official images. +- Scan results don't appear for some official images. diff --git a/content/manuals/docker-hub/repos/_index.md b/content/manuals/docker-hub/repos/_index.md index 7554387ac2f..a6dd3adeb86 100644 --- a/content/manuals/docker-hub/repos/_index.md +++ b/content/manuals/docker-hub/repos/_index.md @@ -54,6 +54,3 @@ In this section, learn how to: - [Archive](./archive.md) an outdated or unsupported repository. - [Delete](./delete.md) a repository. -- [Manage personal settings](./settings.md): For your account, you can set personal - settings for repositories, including default repository privacy and autobuild - notifications. diff --git a/content/manuals/docker-hub/repos/create.md b/content/manuals/docker-hub/repos/create.md index 8bbb8513b57..9e20df6710c 100644 --- a/content/manuals/docker-hub/repos/create.md +++ b/content/manuals/docker-hub/repos/create.md @@ -39,7 +39,7 @@ weight: 20 is only accessible to you and collaborators. In addition, if you selected an organization's namespace, then the repository is accessible to those with applicable roles or permissions. For more details, see [Roles and - permissions](../../security/for-admins/roles-and-permissions.md). + permissions](/manuals/enterprise/security/roles-and-permissions.md). > [!NOTE] > diff --git a/content/manuals/docker-hub/repos/manage/access.md b/content/manuals/docker-hub/repos/manage/access.md index 0adac4195c4..96a33fb7378 100644 --- a/content/manuals/docker-hub/repos/manage/access.md +++ b/content/manuals/docker-hub/repos/manage/access.md @@ -94,7 +94,7 @@ repository from that repository's **Settings** page. Organizations can use roles for individuals, giving them different permissions in the organization. For more details, see [Roles and -permissions](/manuals/security/for-admins/roles-and-permissions.md). +permissions](/manuals/enterprise/security/roles-and-permissions.md). ## Organization teams @@ -105,7 +105,7 @@ access. You must create a team before you are able to configure repository permissions. For more details, see [Create and manage a -team](/manuals/admin/organization/manage-a-team.md). +team](/manuals/admin/organization/manage/manage-a-team.md). To configure team repository permissions: @@ -131,7 +131,7 @@ To configure team repository permissions: Organizations can use OATs. OATs let you assign fine-grained repository access permissions to tokens. For more details, see [Organization access -tokens](/manuals/security/for-admins/access-tokens.md). +tokens](/manuals/enterprise/security/access-tokens.md). ## Gated distribution @@ -141,6 +141,8 @@ Gated distribution allows publishers to securely share private container images This feature is ideal for commercial software publishers who want to control who can pull specific images while preserving a clean separation between internal users and external consumers. +If you are interested in Gated Distribution contact the [Docker Sales Team](https://www.docker.com/pricing/contact-sales/) for more information. + ### Key features - **Private repository distribution**: Content is stored in private repositories and only accessible to explicitly invited users. @@ -154,7 +156,7 @@ This feature is ideal for commercial software publishers who want to control who ### Invite distributor members via API > [!NOTE] -> When you invite members, you assign them a role. See [Roles and permissions](/manuals/security/for-admins/roles-and-permissions.md) for details about the access permissions for each role. +> When you invite members, you assign them a role. See [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md) for details about the access permissions for each role. Distributor members (used for gated distribution) can only be invited using the Docker Hub API. UI-based invitations are not currently supported for this role. To invite distributor members, use the Bulk create invites API endpoint. diff --git a/content/manuals/docker-hub/repos/manage/builds/_index.md b/content/manuals/docker-hub/repos/manage/builds/_index.md index 9ce20e697d1..db9dc183643 100644 --- a/content/manuals/docker-hub/repos/manage/builds/_index.md +++ b/content/manuals/docker-hub/repos/manage/builds/_index.md @@ -3,12 +3,21 @@ description: how automated builds work keywords: docker hub, automated builds title: Automated builds weight: 90 +params: + sidebar: + badge: + color: gray + text: Deprecated aliases: - /docker-hub/builds/how-builds-work/ --- {{< summary-bar feature_name="Automated builds" >}} +> [!WARNING] +> Docker Hub Automated Builds is a deprecated feature. +> It will be fully retired on April 1, 2027. + Docker Hub can automatically build images from source code in an external repository and automatically push the built image to your Docker repositories. @@ -30,7 +39,7 @@ pushing to the registry. You can use these tests to create a continuous integration workflow where a build that fails its tests doesn't push the built image. Automated tests don't push images to the registry on their own. [Learn about automated image testing](automated-testing.md). -Depending on your [subscription](https://www.docker.com/pricing), +Depending on your [subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsHubRepoBuilds), you may get concurrent builds, which means that `N` autobuilds can be run at the same time. `N` is configured according to your subscription. Once `N+1` builds are running, any additional builds go into a queue to be run later. diff --git a/content/manuals/docker-hub/repos/manage/builds/advanced.md b/content/manuals/docker-hub/repos/manage/builds/advanced.md index e34ef9deea2..5428e897af5 100644 --- a/content/manuals/docker-hub/repos/manage/builds/advanced.md +++ b/content/manuals/docker-hub/repos/manage/builds/advanced.md @@ -8,6 +8,11 @@ aliases: - /docker-hub/builds/advanced/ --- + +> [!WARNING] +> Docker Hub Automated Builds is a deprecated feature. +> It will be fully retired on April 1, 2027. + > [!NOTE] > > Automated builds require a @@ -119,7 +124,7 @@ $ docker build --build-arg CUSTOM=$VAR -f $DOCKERFILE_PATH -t $IMAGE_NAME . > A `hooks/build` file overrides the basic `docker build` command used by the builder, so you must include a similar build command in the hook or the automated build fails. -Refer to the [docker build documentation](/reference/cli/docker/buildx/build.md#build-arg) +Refer to the [docker build documentation](/reference/cli/docker/buildx/build/#build-arg) to learn more about Docker build-time variables. #### Push to multiple repositories diff --git a/content/manuals/docker-hub/repos/manage/builds/automated-testing.md b/content/manuals/docker-hub/repos/manage/builds/automated-testing.md index 79d381757cf..4dabaee0426 100644 --- a/content/manuals/docker-hub/repos/manage/builds/automated-testing.md +++ b/content/manuals/docker-hub/repos/manage/builds/automated-testing.md @@ -7,6 +7,10 @@ aliases: - /docker-hub/builds/automated-testing/ --- +> [!WARNING] +> Docker Hub Automated Builds is a deprecated feature. +> It will be fully retired on April 1, 2027. + > [!NOTE] > > Automated builds require a diff --git a/content/manuals/docker-hub/repos/manage/builds/images/autobuild-example.png b/content/manuals/docker-hub/repos/manage/builds/images/autobuild-example.png new file mode 100644 index 00000000000..e1ee9af139b Binary files /dev/null and b/content/manuals/docker-hub/repos/manage/builds/images/autobuild-example.png differ diff --git a/content/manuals/docker-hub/repos/manage/builds/link-source.md b/content/manuals/docker-hub/repos/manage/builds/link-source.md index 61e270190cc..2cabd76c0a3 100644 --- a/content/manuals/docker-hub/repos/manage/builds/link-source.md +++ b/content/manuals/docker-hub/repos/manage/builds/link-source.md @@ -11,6 +11,10 @@ aliases: - /docker-hub/builds/link-source/ --- +> [!WARNING] +> Docker Hub Automated Builds is a deprecated feature. +> It will be fully retired on April 1, 2027. + > [!NOTE] > > Automated builds require a Docker Pro, Team, or Business subscription. @@ -21,7 +25,7 @@ code service to Docker Hub so that it can access your source code repositories. You can configure this link for user accounts or organizations. -If you are linking a source code provider to create autobuilds for a team, follow the instructions to [create a service account](index.md#service-users-for-team-autobuilds) for the team before linking the account as described below. +If you are linking a source code provider to create autobuilds for a team, follow the instructions to [create a service account](setup.md#service-users-for-team-autobuilds) for the team before linking the account as described below. ## Link to a GitHub user account diff --git a/content/manuals/docker-hub/repos/manage/builds/manage-builds.md b/content/manuals/docker-hub/repos/manage/builds/manage-builds.md index d1951da0359..5bf33052290 100644 --- a/content/manuals/docker-hub/repos/manage/builds/manage-builds.md +++ b/content/manuals/docker-hub/repos/manage/builds/manage-builds.md @@ -6,6 +6,10 @@ aliases: - /docker-hub/builds/manage-builds/ --- +> [!WARNING] +> Docker Hub Automated Builds is a deprecated feature. +> It will be fully retired on April 1, 2027. + > [!NOTE] > > Automated builds require a Docker Pro, Team, or Business subscription. diff --git a/content/manuals/docker-hub/repos/manage/builds/migrate.md b/content/manuals/docker-hub/repos/manage/builds/migrate.md new file mode 100644 index 00000000000..c811015fc5b --- /dev/null +++ b/content/manuals/docker-hub/repos/manage/builds/migrate.md @@ -0,0 +1,192 @@ +--- +description: Migrate from Autobuilds to CI/CD workflows +keywords: automated builds, autobuilds, migration, github actions, bitbucket pipelines +title: Migrate from Autobuilds +linkTitle: Migrate +weight: 80 +--- + +> [!WARNING] +> Docker Hub Automated Builds is a deprecated feature. +> It will be fully retired on April 1, 2027. + +This guide explains how to migrate your Docker Hub Autobuilds setup to +Continuous Integration (CI) workflows, focusing on GitHub Actions and Bitbucket +Pipelines as these are the built-in CI services for the two version control +services supported via Autobuilds. + +## Step 1: Create access tokens + +To grant your CI workflows the ability to pull and push images to and from +Docker Hub, you first need to create access tokens: + +- For a personal repository: Create a [Personal + Access Token](../../../../security/access-tokens.md) with **Read & Write** + permissions. + +- For an organization repository: Create an [Organization Access + Token](../../../../enterprise/security/access-tokens.md) with the following + permissions: + - **Read public repositories** + - **Image Pull** on any private repositories that the build needs to pull from + - **Image Push** on the repository that the built image will be pushed to + +The same token can be used for all CI workflows under the account's namespace +provided it has adequate permissions to all relevant Docker Hub repositories. + +Store the token securely in a password manager or your CI/CD platform's secrets +manager. Never commit tokens to source code repositories. + +## Step 2: Extract your Autobuilds configuration + +For each Docker Hub repository currently configured to use Autobuilds, you need +to extract its configuration to set up your CI workflows to duplicate the +existing functionality. The only way to extract the configuration is via the +Docker Hub web interface. + +1. Sign in to [Docker Hub](https://hub.docker.com). + +2. Navigate to your repository by going to **My Hub** > ***Your namespace*** > + **Repositories** > ***Your Repository***. + +3. Go to the **Builds** tab and select **Configure automated builds**. + + If there is no existing build configuration, then this repository is not + configured for Autobuilds. + +4. Note the following configuration details: + + - **Source Repository**: The GitHub or Bitbucket repository. The organization + is the namespace and the repository is the repository name. This is where + you need to add your workflow. + + - **Autotest**: If Autotest is enabled for Pull Requests (either internal + only or internal and external), then extra steps are needed in your + workflow to run the Autotest step. + + - **Repository Links**: Not supported and can be ignored. If chain builds are + required, see the documentation for your CI service on how to chain builds + together. + + - **Build Rules**: Specify the triggers, tags, and paths of your builds. + Ignore any entry where **Autobuild** is toggled off. + + - **Build Environment Variables**: User-defined variables injected as + environment variables into your build. You need to add these to your + workflow. If the environment variables contain secrets, add them to your CI + service's secrets manager. Then update your Dockerfile or build scripts to + reference these secrets using your CI platform's syntax. See your CI + service documentation on how to handle secrets. + +### Example configuration + +The following image shows an example Autobuilds configuration. + +![Example Autobuilds configuration](./images/autobuild-example.png) + +Based on the pictured example, you would note the following items for this +Autobuilds configuration: + +- Source code repository: GitHub repository `docker/docker-rust-hello` +- Autotest: Disabled +- Build rule 1: Build and push the image with tag `latest` when a new commit to + the `main` branch is detected. The Dockerfile is at `./Dockerfile` and the + build context is the root of the cloned code. +- Build rule 2: Build and push the image with tag `v{\1}` when a new commit + to a tag matching the regex `^v([0-9.]+)$` is detected. The Dockerfile is at + `./Dockerfile` and the build context is the root of the cloned code. +- Environment variable: Key `ENV_KEY` with value `ENV_VALUE` + +## Step 3: Migrate to your CI/CD platform + +Select the tab that matches your source code repository hosting platform. + +{{< tabs >}} +{{< tab name="GitHub Actions" >}} + +If your source code repository is hosted on GitHub, see the [Docker +Autobuilds example repository](https://github.com/docker/autobuilds-actions). + +All files except those under the `.github/workflows` directory are for example +purposes only. + +The repository's readme details how to migrate from Autobuilds to GitHub Actions +using one of the two provided workflows: + +- The `simple-build` workflow builds and pushes a Docker image to your Docker + Hub repository. +- The `full-autobuilds` workflow contains all the steps commonly used within an + Autobuilds run, including building, tagging, running Docker Compose tests, and + running optional bash hook files. + +### Steps to migrate + +1. Follow the instructions in the [example repository + readme](https://github.com/docker/autobuilds-actions) to configure a CI GitHub + Action workflow in your GitHub repository. + +2. The workflows contain comments on what each step does and where changes + should be made. Important changes to make include: + + - Set the `DOCKER_REPOSITORY_NAME` environment variable to the full name of your Docker Hub repository + - Set your image tagging policy + - Set the workflow triggers + + Links to relevant documentation are provided in the readme and the workflow comments. + +3. After you have completed migrating to GitHub Actions, delete the build + configuration from your Docker Hub repository: + + 1. Navigate to the repository's **Builds** tab. + + 2. Select **Configure automated builds**. + + 3. Select **Delete Build Configuration**. + +{{< /tab >}} +{{< tab name="Bitbucket Pipelines" >}} + +If your source code repository is hosted on Bitbucket, see the [Docker +Autobuilds Bitbucket example +repository](https://bitbucket.org/docker-io/autobuilds-pipeline). + +All files except the `bitbucket-pipelines.yml` file are for example purposes only. + +The repository's readme details how to migrate from Autobuilds to Bitbucket +Pipelines using the provided example `bitbucket-pipelines.yml` configuration +file. + +The pipeline example contains three separate pipelines: + +- `branches/main`: Shows how to build, test, and push an image on changes to a specific branch +- `tags/*`: Shows how to build, test, and push an image on tag pushes, including + tagging the image the same as the Git tag +- `pull-requests/*`: Shows how to build and test, but not push, an image from a pull request + +### Steps to migrate + +1. Follow the instructions in the [example repository + readme](https://bitbucket.org/docker-io/autobuilds-pipeline) to configure + a Bitbucket Pipeline in your Bitbucket repository. + +2. Comments in the pipeline configuration explain what each part does and where + changes need to be made. Important changes to make include: + + - Set the `DOCKER_REPOSITORY_NAME` environment variable to the full name of + your Docker Hub repository + - Set your image tagging policy (see where the `DOCKER_TAG` variable is set in each pipeline) + - Set the pipeline triggers for branches, tags, and/or pull-requests + + Links to relevant documentation are provided in the readme and the workflow comments. + +3. After you have completed migrating to Bitbucket Pipelines, delete the Build + configuration from your Docker Hub repository: + + 1. Navigate to the repository's **Builds** tab. + + 2. Select **Configure automated builds**. + + 3. Select **Delete Build Configuration**. + +{{< /tab >}} +{{< /tabs >}} diff --git a/content/manuals/docker-hub/repos/manage/builds/setup.md b/content/manuals/docker-hub/repos/manage/builds/setup.md index 38a97f360eb..667480e2a64 100644 --- a/content/manuals/docker-hub/repos/manage/builds/setup.md +++ b/content/manuals/docker-hub/repos/manage/builds/setup.md @@ -10,6 +10,10 @@ aliases: - /docker-hub/builds/ --- +> [!WARNING] +> Docker Hub Automated Builds is a deprecated feature. +> It will be fully retired on April 1, 2027. + > [!NOTE] > > Automated builds require a @@ -30,8 +34,8 @@ when the tests succeed. > [!NOTE] > - > You may be redirected to the settings page to [link](link-source.md) the - > code repository service. Otherwise, if you are editing the build settings + > You may be redirected to the settings page to [link the code repository + > service](link-source.md). Otherwise, if you are editing the build settings > for an existing automated build, select **Configure automated builds**. 4. Select the **source repository** to build the Docker images from. @@ -154,7 +158,7 @@ each repository. In [Docker Hub](https://hub.docker.com), select **My Hub** > ** ### Tag and branch builds -You can configure your automated builds so that pushes to specific branches or tags triggers a build. +You can configure your automated builds so that pushes to specific branches or tags trigger a build. 1. In the **Build Rules** section, select the **plus** icon to add more sources to build. @@ -170,7 +174,7 @@ You can configure your automated builds so that pushes to specific branches or t > [!NOTE] > > You can enter a name, or use a regex to match which source branch or tag - > names to build. To learn more, see [regexes](index.md#regexes-and-automated-builds). + > names to build. To learn more, see [regexes](#regexes-and-automated-builds). 4. Enter the tag to apply to Docker images built from this source. @@ -178,7 +182,7 @@ You can configure your automated builds so that pushes to specific branches or t > > If you configured a regex to select the source, you can reference the > capture groups and use its result as part of the tag. To learn more, see - > [regexes](index.md#regexes-and-automated-builds). + > [regexes](#regexes-and-automated-builds). 5. Repeat steps 2 through 4 for each new build rule you set up. diff --git a/content/manuals/docker-hub/repos/manage/builds/troubleshoot.md b/content/manuals/docker-hub/repos/manage/builds/troubleshoot.md index 917ae4a1721..45774fe5d87 100644 --- a/content/manuals/docker-hub/repos/manage/builds/troubleshoot.md +++ b/content/manuals/docker-hub/repos/manage/builds/troubleshoot.md @@ -8,6 +8,10 @@ aliases: - /docker-hub/builds/troubleshoot/ --- +> [!WARNING] +> Docker Hub Automated Builds is a deprecated feature. +> It will be fully retired on April 1, 2027. + > [!NOTE] > > Automated builds require a @@ -61,7 +65,7 @@ system access to the repositories. This step is optional, but allows you to revoke the build-only keypair without removing other access. 2. Copy the private half of the keypair to your clipboard. -3. In Docker Hub, navigate to the build page for the repository that has linked private submodules. (If necessary, follow the steps [here](index.md#configure-automated-builds) to configure the automated build.) +3. In Docker Hub, navigate to the build page for the repository that has linked private submodules. (If necessary, [follow the steps here](index.md#configure-automated-builds) to configure the automated build.) 4. At the bottom of the screen, select the **plus** icon next to **Build Environment variables**. 5. Enter `SSH_PRIVATE` as the name for the new environment variable. 6. Paste the private half of the keypair into the **Value** field. diff --git a/content/manuals/docker-hub/repos/manage/export.md b/content/manuals/docker-hub/repos/manage/export.md new file mode 100644 index 00000000000..0dea2fe45de --- /dev/null +++ b/content/manuals/docker-hub/repos/manage/export.md @@ -0,0 +1,157 @@ +--- +title: Export organization repositories to CSV +linkTitle: Export repositories +description: Learn how to export a complete list of your organization's Docker Hub repositories using the API. +keywords: docker hub, organization, repositories, export, csv, api, personal access token, pat +--- + +This guide shows you how to export a complete list of repositories from your +Docker Hub organization, including private repositories. You'll use a +Personal Access Token (PAT) from an administrator account to authenticate with +the Docker Hub API and export repository details to a CSV file for reporting or +analysis. + +The exported data includes repository name, visibility status, last updated +date, pull count, and star count. + +## Prerequisites + +Before you begin, ensure you have: + +- Administrator access to a Docker Hub organization +- `curl` installed for making API requests +- `jq` installed for JSON parsing +- A spreadsheet application to view the CSV + +## Create a personal access token + +[Create a personal access token](/security/access-tokens/) from +a user account that has access to the organization's repositories. When creating +the token, select at minimum **Read-only** access permissions to list +repositories. + +> [!IMPORTANT] +> +> Use a PAT from a user account that is a member of the organization. Users +> with owner roles can export all organization repositories. Members can only +> export repositories they have permission to access. + +## Authenticate with the Docker Hub API + +Exchange your personal access token for a JWT bearer token that you'll use +for subsequent API requests. + +1. Set your Docker Hub username, organization name, and personal access token as variables: + + ```bash + USERNAME="" + ORG="" + PAT="" + ``` + +2. Call the authentication endpoint to get a JWT: + + ```bash + TOKEN=$( + curl -s https://hub.docker.com/v2/auth/token \ + -H 'Content-Type: application/json' \ + -d "{\"identifier\":\"$USERNAME\",\"secret\":\"$PAT\"}" \ + | jq -r '.access_token' + ) + ``` + +3. Verify the token was retrieved successfully: + + ```console + $ echo "Got JWT: ${#TOKEN} chars" + ``` + +You'll use this JWT as a Bearer token in the `Authorization` header for all +subsequent API calls. + +## Retrieve all repositories + +The Docker Hub API paginates repository lists. This script retrieves all pages +and combines the results. + +1. Set the page size and initial API endpoint: + + ```bash + PAGE_SIZE=100 + URL="https://hub.docker.com/v2/namespaces/$ORG/repositories?page_size=$PAGE_SIZE" + ``` + +2. Paginate through all results: + + ```bash + ALL=$( + while [ -n "$URL" ] && [ "$URL" != "null" ]; do + RESP=$(curl -s "$URL" -H "Authorization: Bearer $TOKEN") + echo "$RESP" | jq -c '.results[]' + URL=$(echo "$RESP" | jq -r '.next') + done | jq -s '.' + ) + ``` + +3. Verify the number of repositories retrieved: + + ```console + $ echo "$ALL" | jq 'length' + ``` + +The script continues requesting the `next` URL from each response until +pagination is complete. + +## Export to CSV + +Generate a CSV file with repository details that you can open in +spreadsheet applications. + +Run the following command to create `repos.csv`: + +```bash +echo "$ALL" | jq -r ' + (["namespace","name","is_private","last_updated","pull_count","star_count"] | @csv), + (.[] | [ + .namespace, .name, .is_private, .last_updated, (.pull_count//0), (.star_count//0) + ] | @csv) +' > repos.csv +``` + +Verify the export completed: + +```console +$ echo "Rows:" $(wc -l < repos.csv) +``` + +Open the `repos.csv` file in your preferred +spreadsheet application to view and analyze your repository data. + +## Troubleshooting + +### Only public repositories appear + +The Docker Hub account associated with your personal access token may not have +access to private repositories in the organization. + +To fix this: + +1. Verify the account is a member of the organization +2. Check that the account has appropriate permissions (owner or member role) +3. Ensure the personal access token has sufficient access permissions +4. Regenerate the JWT and retry the export + +### API returns 403 or missing fields + +Ensure you're using the JWT from the `/v2/auth/token` endpoint as a +Bearer token in the `Authorization` header, not the personal access +token directly. + +Verify your authentication: + +```console +$ curl -s "https://hub.docker.com/v2/namespaces/$ORG/repositories?page_size=1" \ + -H "Authorization: Bearer $TOKEN" | jq +``` + +If this returns an error, re-run the authentication step to get a fresh JWT. diff --git a/content/manuals/docker-hub/repos/manage/hub-images/bulk-migrate.md b/content/manuals/docker-hub/repos/manage/hub-images/bulk-migrate.md new file mode 100644 index 00000000000..b2523ea78f7 --- /dev/null +++ b/content/manuals/docker-hub/repos/manage/hub-images/bulk-migrate.md @@ -0,0 +1,333 @@ +--- +title: Bulk migrate images +description: Learn how to migrate multiple Docker images and tags between organizations using scripts and automation. +keywords: docker hub, migration, bulk, images, tags, multi-arch +--- + +This guide shows you how to migrate Docker images in bulk between Docker Hub +organizations or namespaces. Whether you're consolidating repositories, changing +organization structure, or moving images to a new account, these techniques help +you migrate efficiently while preserving image integrity. + +The topic is structured to build up in scale: + +1. [Migrate a single image tag](#migrate-a-single-image-tag) +2. [Migrate all tags for a repository](#migrate-all-tags-for-a-repository) +3. [Migrate multiple repositories](#migrate-multiple-repositories) + +The recommended tool for this workflow is `crane`. An equivalent alternative +using `regctl` is also shown. Both tools perform registry-to-registry copies +without pulling images locally and preserve multi-architecture images. + +`crane` is recommended for its simplicity and focused image-copying workflow. +`regctl` is also a good choice, particularly if you already use it for broader +registry management tasks beyond image copying. + +> [!NOTE] +> +> The main workflows in this topic operate on tagged images only. Untagged +> manifests or content no longer reachable from tags are not migrated. In +> practice, these are usually unused artifacts, but be aware of this limitation +> before migration. While you can migrate specific untagged manifests using +> [digest references](#migrate-by-digest), there is no API to enumerate untagged +> manifests in a repository. + +## Prerequisites + +Before you begin, ensure you have: + +- One of the following installed and available in your `$PATH`: + - [`crane`](https://github.com/google/go-containerregistry) + - [`regctl`](https://regclient.org/usage/regctl/) +- Push access to both the source and destination organizations +- Registry authentication configured for your chosen tool + +## Authenticate to registries + +Both tools authenticate directly against registries: + +- `crane` uses Docker credential helpers and `~/.docker/config.json`. See the + [crane documentation](https://github.com/google/go-containerregistry/tree/main/cmd/crane/doc). +- `regctl` uses its own configuration file and can import Docker credentials. + See the [regctl documentation](https://github.com/regclient/regclient/tree/main/docs). + +Follow the authentication instructions for your registry and tool of choice. + +## Migrate a single image tag + +This is the simplest and most common migration scenario. + +The following example script copies the image manifest directly between +registries and preserves multi-architecture images when present. Repeat this +process for each tag you want to migrate. Replace the environment variable +values with your source and destination organization names, repository name, and +tag. + +```bash +#!/usr/bin/env bash +set -euo pipefail + +SRC_ORG="oldorg" +DEST_ORG="neworg" +REPO="myapp" +TAG="1.2.3" + +SRC_IMAGE="${SRC_ORG}/${REPO}:${TAG}" +DEST_IMAGE="${DEST_ORG}/${REPO}:${TAG}" + +# Using crane (recommended) +crane cp "${SRC_IMAGE}" "${DEST_IMAGE}" + +# Using regctl (alternative) +# regctl image copy "${SRC_IMAGE}" "${DEST_IMAGE}" +``` + +### Migrate by digest + +To migrate a specific image by digest instead of tag, use the digest in the +source reference. This is useful when you need to migrate an exact image +version, even if the tag has been updated. Replace the environment variable +values with your source and destination organization names, repository name, +digest, and tag. You can choose between `crane` and `regctl` for the copy +operation. + +```bash +#!/usr/bin/env bash +set -euo pipefail + +SRC_ORG="oldorg" +DEST_ORG="neworg" +REPO="myapp" +DIGEST="sha256:abcd1234..." +TAG="stable" + +SRC_IMAGE="${SRC_ORG}/${REPO}@${DIGEST}" +DEST_IMAGE="${DEST_ORG}/${REPO}:${TAG}" + +# Using crane +crane cp "${SRC_IMAGE}" "${DEST_IMAGE}" + +# Using regctl +# regctl image copy "${SRC_IMAGE}" "${DEST_IMAGE}" +``` + +## Migrate all tags for a repository + +To migrate every tagged image in a repository, use the Docker Hub API to +enumerate tags and copy each one. The following example script retrieves all +tags for a given repository and migrates them in a loop. This approach scales to +repositories with many tags without overwhelming local resources. Note that +there is a rate limit on Docker Hub requests, so you may need to add delays or +pagination handling for large repositories. + +Replace the environment variable values with your source and destination +organization names and repository name. If your source repository is private, +also set `HUB_USER` and `HUB_TOKEN` with credentials that have pull access. You +can also choose between `crane` and `regctl` for the copy operation. + +```bash +#!/usr/bin/env bash +set -euo pipefail + +# Use environment variables if set, otherwise use defaults +SRC_ORG="${SRC_ORG:-oldorg}" +DEST_ORG="${DEST_ORG:-neworg}" +REPO="${REPO:-myapp}" + +# Optional: for private repositories +# HUB_USER="your-username" +# HUB_TOKEN="your-access-token" +# AUTH="-u ${HUB_USER}:${HUB_TOKEN}" +AUTH="" + +TOOL="crane" # or: TOOL="regctl" + +TAGS_URL="https://hub.docker.com/v2/repositories/${SRC_ORG}/${REPO}/tags?page_size=100" + +while [[ -n "${TAGS_URL}" && "${TAGS_URL}" != "null" ]]; do + RESP=$(curl -fsSL ${AUTH} "${TAGS_URL}") + + echo "${RESP}" | jq -r '.results[].name' | while read -r TAG; do + [[ -z "${TAG}" ]] && continue + + SRC_IMAGE="${SRC_ORG}/${REPO}:${TAG}" + DEST_IMAGE="${DEST_ORG}/${REPO}:${TAG}" + + echo "Migrating ${SRC_IMAGE} → ${DEST_IMAGE}" + + case "${TOOL}" in + crane) + crane cp "${SRC_IMAGE}" "${DEST_IMAGE}" + ;; + regctl) + regctl image copy "${SRC_IMAGE}" "${DEST_IMAGE}" + ;; + esac + done + + TAGS_URL=$(echo "${RESP}" | jq -r '.next') +done +``` + +> [!NOTE] +> +> Docker Hub automatically creates the destination repository on first push if +> your account has permission. + +## Migrate multiple repositories + +To migrate several repositories, create a list and run the single-repository +script for each one. + +For example, create a `repos.txt` file with repository names: + +```text +api +web +worker +``` + +Save the script from the previous section as `migrate-single-repo.sh`. Then, run +the following example script that processes each repository in the file. Replace +the environment variable values with your source and destination organization +names. + +```bash +#!/usr/bin/env bash +set -euo pipefail + +SRC_ORG="oldorg" +DEST_ORG="neworg" + +while read -r REPO; do + [[ -z "${REPO}" ]] && continue + echo "==== Migrating repo: ${REPO}" + SRC_ORG="${SRC_ORG}" DEST_ORG="${DEST_ORG}" REPO="${REPO}" ./migrate-single-repo.sh +done < repos.txt +``` + +## Verify migration integrity + +After copying, verify that source and destination match by comparing digests. + +### Basic digest verification + +The following example script retrieves the image digest for a specific tag from +both source and destination and compares them. If the digests match, the +migration is successful. Replace the environment variable values with your +source and destination organization names, repository name, and tag. You can +choose between `crane` and `regctl` for retrieving digests. + +```bash +#!/usr/bin/env bash +set -euo pipefail + +SRC_ORG="oldorg" +DEST_ORG="neworg" +REPO="myapp" +TAG="1.2.3" + +SRC_IMAGE="${SRC_ORG}/${REPO}:${TAG}" +DEST_IMAGE="${DEST_ORG}/${REPO}:${TAG}" + +# Using crane +SRC_DIGEST=$(crane digest "${SRC_IMAGE}") +DEST_DIGEST=$(crane digest "${DEST_IMAGE}") + +# Using regctl (alternative) +# SRC_DIGEST=$(regctl image digest "${SRC_IMAGE}") +# DEST_DIGEST=$(regctl image digest "${DEST_IMAGE}") + +echo "Source: ${SRC_DIGEST}" +echo "Destination: ${DEST_DIGEST}" + +if [[ "${SRC_DIGEST}" == "${DEST_DIGEST}" ]]; then + echo "✓ Migration verified: digests match" +else + echo "✗ Migration failed: digests do not match" + exit 1 +fi +``` + +### Multi-arch verification + +For multi-architecture images, also verify the manifest list to ensure all +platforms were copied correctly. Replace the environment variable values with +your source and destination organization names, repository name, and tag. You +can choose between `crane` and `regctl` for retrieving manifests. + +```bash +#!/usr/bin/env bash +set -euo pipefail + +SRC_ORG="oldorg" +DEST_ORG="neworg" +REPO="myapp" +TAG="1.2.3" + +SRC_IMAGE="${SRC_ORG}/${REPO}:${TAG}" +DEST_IMAGE="${DEST_ORG}/${REPO}:${TAG}" + +# Using crane +SRC_MANIFEST=$(crane manifest "${SRC_IMAGE}") +DEST_MANIFEST=$(crane manifest "${DEST_IMAGE}") + +# Using regctl (alternative) +# SRC_MANIFEST=$(regctl image manifest --format raw-body "${SRC_IMAGE}") +# DEST_MANIFEST=$(regctl image manifest --format raw-body "${DEST_IMAGE}") + +# Check if it's a manifest list (multi-arch) +if echo "${SRC_MANIFEST}" | jq -e '.manifests' > /dev/null 2>&1; then + echo "Multi-arch image detected" + + # Compare platform list + SRC_PLATFORMS=$(echo "${SRC_MANIFEST}" | jq -r '.manifests[] | "\(.platform.os)/\(.platform.architecture)"' | sort) + DEST_PLATFORMS=$(echo "${DEST_MANIFEST}" | jq -r '.manifests[] | "\(.platform.os)/\(.platform.architecture)"' | sort) + + if [[ "${SRC_PLATFORMS}" == "${DEST_PLATFORMS}" ]]; then + echo "✓ Platform list matches:" + echo "${SRC_PLATFORMS}" + else + echo "✗ Platform lists do not match" + echo "Source platforms:" + echo "${SRC_PLATFORMS}" + echo "Destination platforms:" + echo "${DEST_PLATFORMS}" + exit 1 + fi +else + echo "Single-arch image" +fi +``` + +## Complete the migration + +After migrating your images, complete these additional steps: + +1. Copy repository metadata in the Docker Hub UI or via API: + + - README content + - Repository description + - Topics and tags + +2. Configure repository settings to match the source: + + - Visibility (public or private) + - Team permissions and access controls + +3. Reconfigure integrations in the destination organization: + + - Webhooks + - Automated builds + - Security scanners + +4. Update image references in your projects: + + - Change `FROM oldorg/repo:tag` to `FROM neworg/repo:tag` in Dockerfiles + - Update deployment configurations + - Update documentation + +5. Deprecate the old location: + - Update the source repository description to point to the new location + - Consider adding a grace period before making the old repository private or + read-only \ No newline at end of file diff --git a/content/manuals/docker-hub/repos/manage/hub-images/immutable-tags.md b/content/manuals/docker-hub/repos/manage/hub-images/immutable-tags.md index b0ae4abee53..d01fee36c01 100644 --- a/content/manuals/docker-hub/repos/manage/hub-images/immutable-tags.md +++ b/content/manuals/docker-hub/repos/manage/hub-images/immutable-tags.md @@ -25,15 +25,20 @@ To enable immutable tags for your repository: 1. Sign in to [Docker Hub](https://hub.docker.com). 2. Select **My Hub** > **Repositories**. 3. Select the repository where you want to enable immutable tags. -4. Select the **Settings** tab -5. Under **Tag mutability settings**, select **Immutable**. +4. Go to **Settings** > **General**. +5. Under **Tag mutability settings**, select one of the following options: + - **All tags are mutable (Default)**: + Tags can be changed to reference a different image. This lets you retarget a tag without creating a new one. + - **All tags are immutable**: + Tags cannot be updated to point to a different image after creation. This ensures consistency and prevents accidental changes. This includes the `latest` tag. + - **Specific tags are immutable**: + Define specific tags that cannot be updated after creation using regex values. 6. Select **Save**. Once enabled, all tags are locked to their specific images, ensuring that each tag always points to the same image version and cannot be modified. - > [!NOTE] -> -> All tags in the repository become immutable, including the `latest` tag. +> [!NOTE] +> This implementation of regular expressions follows the [Go regexp package](https://pkg.go.dev/regexp), which is based on the RE2 engine. For more information, visit [RE2 Regular Expression Syntax](https://github.com/google/re2/wiki/Syntax). ## Working with immutable tags @@ -48,3 +53,8 @@ To push an image, create a new tag for your updated image and push it to the rep + + + + + diff --git a/content/manuals/docker-hub/repos/manage/hub-images/manage.md b/content/manuals/docker-hub/repos/manage/hub-images/manage.md index 7af6b2f21d9..b0744339dcf 100644 --- a/content/manuals/docker-hub/repos/manage/hub-images/manage.md +++ b/content/manuals/docker-hub/repos/manage/hub-images/manage.md @@ -26,6 +26,10 @@ The following objects are shown in the diagram. ## Manage repository images and image indexes +Use the following steps to delete one or more items via the graphical user +interface. To delete in bulk, see the [deletion API +endpoint](/reference/api/registry/latest/#tag/delete). + 1. Sign in to [Docker Hub](https://hub.docker.com). 2. Select **My Hub** > **Repositories**. 3. In the list, select a repository. @@ -49,4 +53,11 @@ The following objects are shown in the diagram. 2. Select **Preview and delete**. 3. In the window that appears, verify the items that will be deleted and the amount of storage you will reclaim. - 4. Select **Delete forever**. \ No newline at end of file + 4. Select **Delete forever**. + + > [!NOTE] + > + > Deletion operations may take some time to complete. Timeout errors may + > occur during the deletion process. The system automatically retries the + > deletion in the background, and the items will be removed without requiring + > any action from you. \ No newline at end of file diff --git a/content/manuals/docker-hub/repos/manage/hub-images/move.md b/content/manuals/docker-hub/repos/manage/hub-images/move.md index aa1ebd6273b..aaebb824ad5 100644 --- a/content/manuals/docker-hub/repos/manage/hub-images/move.md +++ b/content/manuals/docker-hub/repos/manage/hub-images/move.md @@ -12,11 +12,16 @@ contributing to an organization. This topic explains how to move images between Docker Hub repositories, ensuring that your content remains accessible and organized under the correct accounts or namespaces. +> [!NOTE] +> +> For bulk migrations, multi-arch images, or scripted workflows, see [Bulk +> migrate Docker images](/manuals/docker-hub/repos/manage/hub-images/bulk-migrate.md). + ## Personal to personal When consolidating personal repositories, you can pull private images from the initial repository and push them into another repository owned by you. To avoid losing your private images, perform the following steps: -1. [Sign up](https://app.docker.com/signup) for a new Docker account with a personal subscription. +1. [Sign up](https://app.docker.com/signup) for a new Docker account with a personal subscription. (Be sure to verify your account after you've signed up.) 2. Sign in to [Docker](https://app.docker.com/login) using your original Docker account 3. Pull your images: @@ -29,6 +34,7 @@ When consolidating personal repositories, you can pull private images from the i ```console $ docker tag namespace1/docker101tutorial new_namespace/docker101tutorial ``` + 5. Using `docker login` from the CLI, sign in with your newly created Docker account, and push your newly tagged private images to your new Docker account namespace: ```console @@ -49,15 +55,17 @@ personal account and push them to an organization that's owned by you. ```console $ docker pull namespace1/docker101tutorial ``` + 4. Tag your images with your new organization namespace: ```console $ docker tag namespace1/docker101tutorial /docker101tutorial ``` + 5. Push your newly tagged images to your new org namespace: ```console $ docker push new_org/docker101tutorial ``` -The private images that existed in your user account are now available for your organization. \ No newline at end of file +The private images that existed in your user account are now available for your organization. diff --git a/content/manuals/docker-hub/repos/manage/hub-images/oci-artifacts.md b/content/manuals/docker-hub/repos/manage/hub-images/oci-artifacts.md index bb7b786cf20..d8cd90ef685 100644 --- a/content/manuals/docker-hub/repos/manage/hub-images/oci-artifacts.md +++ b/content/manuals/docker-hub/repos/manage/hub-images/oci-artifacts.md @@ -78,23 +78,24 @@ Steps: ```console $ helm package demo - Successfully packaged chart and saved it to: /Users/hubuser/demo-0.1.0.tgz + Successfully packaged chart and saved it to: demo-0.1.0.tgz ``` 3. Sign in to Docker Hub with Helm, using your Docker credentials. ```console - $ helm registry login registry-1.docker.io -u hubuser + $ helm registry login registry-1.docker.io -u ``` 4. Push the chart to a Docker Hub repository. ```console - $ helm push demo-0.1.0.tgz oci://registry-1.docker.io/docker + $ helm push demo-0.1.0.tgz oci://registry-1.docker.io/ ``` - This uploads the Helm chart tarball to a `demo` repository in the `docker` - namespace. + This uploads the Helm chart tarball to a `demo` repository in the `` + namespace. Running this command creates a `/demo` repository + if one does not already exist. 5. Go to the repository page on Docker Hub. The **Tags** section of the page shows the Helm chart tag. @@ -127,18 +128,18 @@ Steps: 2. Sign in to Docker Hub using the ORAS CLI. ```console - $ oras login -u hubuser registry-1.docker.io + $ oras login -u registry-1.docker.io ``` 3. Push the file to Docker Hub. ```console - $ oras push registry-1.docker.io/docker/demo:0.0.1 \ + $ oras push registry-1.docker.io//demo:0.0.1 \ --artifact-type=application/vnd.docker.volume.v1+tar.gz \ myvolume.txt:text/plain ``` - This uploads the volume to a `demo` repository in the `docker` namespace. The + This uploads the volume to a `demo` repository in the `` namespace. The `--artifact-type` flag specifies a special media type that makes Docker Hub recognize the artifact as a container volume. @@ -166,13 +167,13 @@ Steps: 2. Sign in to Docker Hub using the ORAS CLI. ```console - $ oras login -u hubuser registry-1.docker.io + $ oras login -u registry-1.docker.io ``` 3. Push the file to Docker Hub. ```console - $ oras push registry-1.docker.io/docker/demo:0.0.1 myartifact.txt:text/plain + $ oras push registry-1.docker.io//demo:0.0.1 myartifact.txt:text/plain ``` 4. Go to the repository page on Docker Hub. The **Tags** section on that page diff --git a/content/manuals/docker-hub/repos/manage/trusted-content/_index.md b/content/manuals/docker-hub/repos/manage/trusted-content/_index.md index 0ae04840e94..4a00bc2931a 100644 --- a/content/manuals/docker-hub/repos/manage/trusted-content/_index.md +++ b/content/manuals/docker-hub/repos/manage/trusted-content/_index.md @@ -29,4 +29,8 @@ In this section, learn about: through vulnerability analysis. - [Insights and analytics](./insights-analytics.md): Access detailed metrics on image and extension usage, including pull counts, geolocation, and client - data, to understand user behavior and optimize your content. \ No newline at end of file + data, to understand user behavior and optimize your content. + +For Docker Hardened Images guidance, including how to contribute, see +[Contribute to the catalog](/dhi/how-to/build/#contribute-to-the-catalog) in the +dedicated DHI docs section. diff --git a/content/manuals/docker-hub/repos/manage/trusted-content/dsos-program.md b/content/manuals/docker-hub/repos/manage/trusted-content/dsos-program.md index ee404d7db33..9b68d9b785e 100644 --- a/content/manuals/docker-hub/repos/manage/trusted-content/dsos-program.md +++ b/content/manuals/docker-hub/repos/manage/trusted-content/dsos-program.md @@ -7,7 +7,7 @@ aliases: - /trusted-content/dsos-program/ --- -[Docker-Sponsored Open Source images](https://hub.docker.com/search?q=&image_filter=open_source) are published and maintained by open-source projects sponsored by Docker through the program. +[Docker-Sponsored Open Source images](https://hub.docker.com/search?badges=open_source) are published and maintained by open-source projects sponsored by Docker through the program. Images that are part of this program have a special badge on Docker Hub making it easier for users to identify projects that Docker has verified as trusted, secure, and active open-source projects. @@ -31,8 +31,7 @@ These benefits are valid for one year and publishers can renew annually if the p DSOS organizations can upload custom images for individual repositories on Docker Hub. This lets you override the default organization-level logo on a per-repository basis. -Only a user with administrative access (owner or team member with administrator permission) -over the repository can change the repository logo. +Only a user with an owner or editor role for the organization can change the repository logo. #### Image requirements diff --git a/content/manuals/docker-hub/repos/manage/trusted-content/dvp-program.md b/content/manuals/docker-hub/repos/manage/trusted-content/dvp-program.md index 39f75d10d30..c7ff5737a30 100644 --- a/content/manuals/docker-hub/repos/manage/trusted-content/dvp-program.md +++ b/content/manuals/docker-hub/repos/manage/trusted-content/dvp-program.md @@ -14,44 +14,139 @@ aliases: - /docker-hub/publish/repository-logos/ - /docker-hub/dvp-program/ - /trusted-content/dvp-program/ +toc_max: 2 --- -[The Docker Verified Publisher Program](https://hub.docker.com/search?q=&image_filter=store) provides high-quality images from commercial publishers verified by Docker. +[The Docker Verified Publisher +Program](https://hub.docker.com/search?badges=verified_publisher) provides +high-quality images from commercial publishers verified by Docker. -These images help development teams build secure software supply chains, minimizing exposure to malicious content early in the process to save time and money later. +These images help development teams build secure software supply chains, +minimizing exposure to malicious content early in the process to save time and +money later. -Images that are part of this program have a special badge on Docker Hub making it easier for users to identify projects that Docker has verified as high-quality commercial publishers. +## Who's eligible to become a verified publisher? -![Docker-Sponsored Open Source badge](../../../images/verified-publisher-badge-iso.png) +Any independent software vendor who distributes software on Docker Hub can join +the Verified Publisher Program. Find out more by heading to the [Docker Verified +Publisher Program](https://www.docker.com/partners/programs) page. + +> [!NOTE] +> +> DVP entitlements are applied per namespace (organization). If you operate +> multiple Docker Hub namespaces, each requires a separate DVP application and +> verification process. + +## Program benefits + +The Docker Verified Publisher Program (DVP) provides several features and +benefits to Docker Hub publishers. The program grants the following perks based +on participation tier: + +- [Enterprise-grade infrastructure](#enterprise-grade-infrastructure): High + availability hosting with 99.9% uptime +- [Verified publisher badge](#verified-publisher-badge): Special badge + identifying high-quality commercial publishers +- [Repository logo](#repository-logo): Upload custom logos for individual + repositories +- [Insights and analytics](#insights-and-analytics): Detailed usage metrics and + community engagement data +- [Vulnerability analysis](#vulnerability-analysis): Automated security scanning + with Docker Scout +- [Priority search ranking](#priority-search-ranking): Enhanced discoverability + in Docker Hub search results +- [Co-marketing opportunities](#co-marketing-opportunities): Joint promotional + activities with Docker + +### Enterprise-grade infrastructure + +The Docker Verified Publisher Program runs on Docker Hub's enterprise-scale +infrastructure, serving millions of developers globally. Your published content +benefits from: + +- High availability and uptime: Docker's systems are designed for failover + across multiple availability zones, with load-balanced autoscaling, enabling + 99.9% uptime. +- Global delivery and fast downloads: Docker leverages a global CDN and caching + infrastructure to achieve cache hit ratios more than 99%, reducing reliance + on origin traffic and ensuring fast access for developers everywhere. +- Durability: Docker maintains a documented backup policy and performs full + daily backups of production data. + +You simply push your images to Docker Hub as usual, and Docker takes care of the +rest, serving your image to millions of developers worldwide. + +![DVP flow in Docker Hub](./images/dvp-hub-flow.svg) + +To learn more, see [Availability at +Docker](https://www.docker.com/trust/availability/). + +### Verified publisher badge -The Docker Verified Publisher Program (DVP) provides several features and benefits to Docker -Hub publishers. The program grants the following perks based on participation tier: +Images that are part of this program have a special badge on Docker Hub making +it easier for users to identify projects that Docker has verified as +high-quality commercial publishers. -- Repository logo -- Verified publisher badge -- Priority search ranking in Docker Hub -- Insights and analytics -- Vulnerability analysis -- Additional Docker Business seats -- Removal of rate limiting for developers -- Co-marketing opportunities +![Docker-Sponsored Open Source +badge](../../../images/verified-publisher-badge.png) ### Repository logo -DVP organizations can upload custom images for individual repositories on Docker Hub. -This lets you override the default organization-level logo on a per-repository basis. +DVP organizations can upload custom images for individual repositories on Docker +Hub. This lets you override the default organization-level logo on a +per-repository basis. + +To manage the repository logo, see [Manage repository logo](#manage-repository-logo). + +### Vulnerability analysis + +[Docker Scout](/scout/) provides automatic vulnerability analysis +for DVP images published to Docker Hub. +Scanning images ensures that the published content is secure, and proves to +developers that they can trust the image. + +You can enable analysis on a per-repository basis. For more about using this +feature, see [Basic vulnerability +scanning](/docker-hub/repos/manage/vulnerability-scanning/). + +### Priority search ranking + +Verified publisher images receive enhanced visibility in Docker Hub search +results, making it easier for developers to discover your content. This improved +discoverability helps drive adoption of your images within the developer +community. + +### Co-marketing opportunities + +Docker collaborates with verified publishers on joint marketing initiatives, +including blog posts, case studies, webinars, and conference presentations. +These opportunities help amplify your brand visibility within the Docker +ecosystem. + +### Insights and analytics + +The insights and analytics service provides usage metrics for how +the community uses Docker images, granting insight into user behavior. + +There is both a [web interface](./insights-analytics.md) and an +[API](/reference/api/dvp/latest/) for accessing the analytics data. + +The usage metrics show the number of image pulls by tag or by digest, +geolocation, cloud provider, client, and more. -Only a user with administrative access (owner or team member with administrator permission) -over the repository can change the repository logo. +## Manage repository logo -#### Image requirements +After joining the Docker Verified Publisher Program, you can set a custom logo +for each repository in your organization. The following requirements apply: - The supported filetypes for the logo image are JPEG and PNG. - The minimum allowed image size in pixels is 120×120. - The maximum allowed image size in pixels is 1000×1000. - The maximum allowed image file size is 5MB. -#### Set the repository logo +Only a user with an owner or editor role for the organization can change the repository logo. + +### Set the repository logo 1. Sign in to [Docker Hub](https://hub.docker.com). 2. Go to the page of the repository that you want to change the logo for. @@ -61,44 +156,12 @@ current repository logo. 4. In the dialog that opens, select the PNG image that you want to upload to set it as the logo for the repository. -#### Remove the logo +### Remove the logo Select the **Clear** button ({{< inline-image src="../../../images/clear_logo_sm.png" alt="clear button" >}}) to remove a logo. -Removing the logo makes the repository default to using the organization logo, if set, or the following default logo if not. +Removing the logo makes the repository default to using the organization logo, +if set, or the following default logo if not. ![Default logo which is a 3D grey cube](../../../images/default_logo_sm.png) - -### Verified publisher badge - -Images that are part of this program have a badge on Docker Hub making it easier for developers -to identify projects that Docker has verified as high quality publishers and with content they can trust. - -![Docker, Inc. org with a verified publisher badge](../../../images/verified-publisher-badge.png) - -### Insights and analytics - -The [insights and analytics](./insights-analytics.md) service provides usage metrics for how -the community uses Docker images, granting insight into user behavior. - -The usage metrics show the number of image pulls by tag or by digest, and breakdowns by -geolocation, cloud provider, client, and more. - -You can select the time span for which you want to view analytics data. You can also export the data in either a summary or raw format. - -### Vulnerability analysis - -[Docker Scout](/scout/) provides automatic vulnerability analysis -for DVP images published to Docker Hub. -Scanning images ensures that the published content is secure, and proves to -developers that they can trust the image. - -You can enable analysis on a per-repository -basis. For more about using this feature, see [Basic vulnerability scanning](/docker-hub/repos/manage/vulnerability-scanning/). - -### Who's eligible to become a verified publisher? - -Any independent software vendor who distributes software on Docker Hub can join -the Verified Publisher Program. Find out more by heading to the -[Docker Verified Publisher Program](https://www.docker.com/partners/programs) page. diff --git a/content/manuals/docker-hub/repos/manage/trusted-content/images/dvp-hub-flow.svg b/content/manuals/docker-hub/repos/manage/trusted-content/images/dvp-hub-flow.svg new file mode 100644 index 00000000000..d3c73a7b93c --- /dev/null +++ b/content/manuals/docker-hub/repos/manage/trusted-content/images/dvp-hub-flow.svg @@ -0,0 +1,5 @@ + + +Write and packagecode as a DockerimageDocker HubDVPHigh-availabilityCDN & cachingDurabilityDevelopers / CIPull & use your DVPimages anywhere99.9% uptimeFast access everywhereDaily backups \ No newline at end of file diff --git a/content/manuals/docker-hub/repos/manage/trusted-content/insights-analytics.md b/content/manuals/docker-hub/repos/manage/trusted-content/insights-analytics.md index 4adf8b19bbf..88c819c375a 100644 --- a/content/manuals/docker-hub/repos/manage/trusted-content/insights-analytics.md +++ b/content/manuals/docker-hub/repos/manage/trusted-content/insights-analytics.md @@ -3,27 +3,310 @@ title: Insights and analytics description: Discover how to access usage statistics of your images on Docker Hub keywords: docker hub, hub, insights, analytics, api, verified publisher aliases: -- /docker-hub/publish/insights-analytics/ -- /docker-hub/insights-analytics/ -- /trusted-content/insights-analytics/ + - /docker-hub/publish/insights-analytics/ + - /docker-hub/insights-analytics/ + - /trusted-content/insights-analytics/ --- -Insights and analytics provides usage analytics for Docker Verified -Publisher (DVP) and Docker-Sponsored Open Source (DSOS) images on Docker Hub. This includes self-serve access to image and extension usage metrics for a desired time span. You can also display the number of image pulls by tag or by digest, and get breakdowns by geolocation, cloud provider, client, and more. +Insights and analytics provides usage analytics for [Docker Verified Publisher +(DVP)](https://www.docker.com/partners/programs/) and [Docker-Sponsored Open +Source (DSOS)](https://www.docker.com/community/open-source/application/#) +images on Docker Hub. This includes self-serve access to image and extension +usage metrics for a desired time span. You can see the number of image pulls by +tag or by digest, geolocation, cloud provider, client, and more. - -> [!TIP] +> [!NOTE] +> +> The Legacy DVP program applies to existing customers who have not yet renewed +> to DVP Core. The DVP Legacy program is deprecated and will be retired. Contact +> your Docker sales representative or +> [Docker](https://www.docker.com/partners/programs/) for more information. + +{{< tabs >}} +{{< tab name="DVP program" >}} + +All members of an organization have access to the analytics data. Members can +access analytics data in the [Docker Hub](https://hub.docker.com/) web interface. + +## Available reports + +The following reports may be available for download as CSV files: + +- [Summary](#summary-report) +- [Trends](#trends-report) +- [Technographic](#technographic-report) +- [Technographic companies](#technographic-companies-report) +- [Tracked companies](#tracked-companies-report) + +The reports available for download may vary based on your organization's +subscription. Contact your Docker sales representative or +[Docker](https://www.docker.com/partners/programs/) for more information. + +## Configure DVP analytics settings + +Organization owners and editors can configure DVP analytics settings through the +Admin Console to control tracked companies and benchmark report allocations for +your verified publisher namespaces. + +1. Sign in to [Docker Home](https://app.docker.com) and select your organization. +2. Select **Admin Console** > **Verified Publisher**. +3. Configure the settings: + - **Tracked companies**: Set the number of companies to track for reporting + purposes. This setting determines how many company domains appear in your + [Tracked companies report](#tracked-companies-report). You can only set + this number up to the maximum included in your DVP subscription. + - **Benchmark report allocations**: If your organization has benchmark + reports enabled, enter the number of companies to include in the benchmark + report for each namespace listed. +4. Select **Save** to apply your changes. + +### Summary report + +The summary report provides high-level usage metrics aggregated across all your +Docker Hub content, organized by namespace and repository. This report gives you +a comprehensive overview of your image portfolio performance, helping you +understand which repositories, tags, and specific image versions are most +popular with your users. + +You can use this report to answer questions like: + +- Which of my repositories are getting the most usage? +- How do different image tags compare in terms of adoption? +- What's the ratio of actual downloads versus version checks across my + portfolio? +- Which specific image digests are being pulled most frequently? +- How has overall usage changed over time for my entire image collection? + +To access the report: + +1. Sign in to [Docker Hub](https://hub.docker.com/). +2. Select **My Hub** in the top navigation. +3. Select your organization in the left navigation. +4. Select **Analytics** > **Overview** in the left navigation. +5. Download the report by doing one of the following: + - Select **Download Weekly Summary**. + - Select the **Download Monthly Summary**. + - Expand the **Summary reports for the year** drop-down and then select + **Download report** for the desired week or month. + +The summary report is a CSV file that contains the following data points: + +| Field | Description | +| ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `DATE_GRANULARITY` | Weekly or monthly granularity of the data. Indicates whether the data is aggregated by week or month. | +| `DATE_REFERENCE` | The start date of the week or month in YYYY-MM-DD format (e.g., `2025-09-29` for the week starting September 29, 2025). | +| `PUBLISHER_NAME` | The name of the Docker organization that owns the repository (e.g., `demonstrationorg`). | +| `LEVEL` | The aggregation level of the data - either `repository` (summary for entire repository), `tag` (summary for specific tag), or `digest` (summary for specific digest). | +| `REFERENCE` | The specific reference being summarized - the repository name, tag name, or digest hash depending on the level. | +| `DATA_DOWNLOADS` | The number of actual image downloads. | +| `VERSION_CHECKS` | The number of version checks performed (HEAD requests to check for updates without downloading the full image). | +| `EVENT_COUNT` | The total number of events, calculated as the sum of data downloads and version checks. | + +### Trends report + +The trends report helps you understand how adoption of your container images +evolves over time. It provides visibility into pull activity across repositories +and tags, enabling you to identify adoption patterns, version migration trends, +and usage environments (e.g., local development, CI/CD, production). + +You can use this report to answer questions like: + +- Which versions are gaining or losing traction? +- Is a new release being adopted? +- How does usage vary across cloud providers? + +To access the report: + +1. Sign in to [Docker Hub](https://hub.docker.com/). +2. Select **My Hub** in the top navigation. +3. Select your organization in the left navigation. +4. Select **Analytics** > **Trends** in the left navigation. +5. Select **DATA BY WEEK** or **DATA BY MONTH** to choose the data granularity. +6. Select **Download report** for the desired week or month. + +The trends report is a CSV file that contains the following data points: + +| Field | Description | +| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | +| `DATE_GRANULARITY` | Weekly or monthly granularity of the data. | +| `DATE_REFERENCE` | The start date of the week or month. | +| `PUBLISHER_NAME` | The name of the organization that owns the repository. | +| `IMAGE_REPOSITORY` | The full name of the image repository (e.g., `demonstrationorg/scout-demo`). | +| `NAMESPACE` | The Docker organization or namespace that owns the repository. | +| `IP_COUNTRY` | The country code (ISO 3166-1 alpha-2) where the pull request originated from (e.g., `US`, `CA`). | +| `CLOUD_SERVICE_PROVIDER` | The cloud service provider used for the pull request (e.g., `gcp`, `aws`, `azure`) or `no csp` for non-cloud providers. | +| `USER_AGENT` | The client application or tool used to pull the image (e.g., `docker`, `docker-scout`, `node-fetch`, `regclient`). | +| `TAG` | The specific image tag that was pulled, or `\\N` if no specific tag was used. | +| `DATA_DOWNLOADS` | The number of data downloads for the specified criteria. | +| `VERSION_CHECKS` | The number of version checks (HEAD requests) performed without downloading the full image. | +| `PULLS` | The total number of pull requests (data downloads + version checks). | +| `UNIQUE_AUTHENTICATED_USERS` | The number of unique authenticated users who performed pulls. | +| `UNIQUE_UNAUTHENTICATED_USERS` | The number of unique unauthenticated users who performed pulls. | + +### Technographic report + +The technographic report provides insights into how your Docker Verified +Publisher (DVP) images are used alongside other container images in real-world +technology stacks. This report helps you understand the technical ecosystem +where your images operate and identify co-usage patterns with other images. + +You can use this report to answer questions like: + +- Which other images are commonly used together with your images? +- What percentage of your user base also uses specific complementary + technologies? +- How many companies in your ecosystem use both your image and other popular + images? +- What technology stacks are most popular among your users? + +To access the report: + +1. Sign in to [Docker Hub](https://hub.docker.com/). +2. Select **My Hub** in the top navigation. +3. Select your organization in the left navigation. +4. Select **Analytics** > **Technographic** in the left navigation. +5. Select **DATA BY WEEK** or **DATA BY MONTH** to choose the data granularity. +6. Select **Download report** for the desired week or month. + +The technographic report is a CSV file that contains the following data points: + +| Field | Description | +| ------------------ | ---------------------------------------------------------------------------------------------------------- | +| `DATE_GRANULARITY` | Weekly or monthly granularity of the data. | +| `DATE_REFERENCE` | The start date of the week or month in YYYY-MM-DD format. | +| `PUBLISHER_ID` | The unique identifier for the publisher organization. | +| `PUBLISHER_NAME` | The name of the organization that owns the DVP repository. | +| `DVPP_IMAGE` | Your Docker Verified Publisher image repository name. | +| `PAIRED_IMAGE` | The other image repository that is commonly used together with your DVP image. | +| `USERS` | The number of unique users who pulled both your DVP image and the paired image within the time period. | +| `TOTAL_PULLERS` | The total number of unique users who pulled your DVP image during the time period. | +| `PCT_USERS` | The percentage of your image's users who also use the paired image (users/total_pullers). | +| `DOMAINS` | The number of unique company domains that pulled both your DVP image and the paired image. | +| `TOTAL_DOMAINS` | The total number of unique company domains that pulled your DVP image. | +| `PCT_DOMAINS` | The percentage of company domains using your image that also use the paired image (domains/total_domains). | + +> [!NOTE] +> +> To protect user privacy and ensure statistical significance, the technographic +> report only includes image pairings that have at least 10 unique users. +> Personal, disposable, and university email domains are excluded from the +> company domain analysis. + +### Technographic companies report + +The technographic companies report provides a detailed view of which specific +companies (identified by their domains) are using your Docker Verified Publisher +(DVP) images together with other container images. This report gives you +visibility into the actual organizations adopting your technology stack +combinations, enabling targeted business development and partnership +opportunities. + +You can use this report to answer questions like: + +- Which companies are using my image alongside specific complementary + technologies? +- What technology stacks are adopted by enterprise customers in my target + market? +- Which organizations might be good candidates for partnership discussions? +- How can I identify potential customers who are already using related + technologies? + +To access the report: + +1. Sign in to [Docker Hub](https://hub.docker.com/). +2. Select **My Hub** in the top navigation. +3. Select your organization in the left navigation. +4. Select **Analytics** > **Technographic** in the left navigation. +5. Select **DATA BY WEEK** or **DATA BY MONTH** to choose the data granularity. +6. Select **Download report** for the desired week or month. + +The technographic companies report is a CSV file that contains the following +data points: + +| Field | Description | +| ------------------ | ---------------------------------------------------------------------------------------------- | +| `DATE_GRANULARITY` | Weekly or monthly granularity of the data. | +| `DATE_REFERENCE` | The start date of the week or month in YYYY-MM-DD format. | +| `PUBLISHER_NAME` | The name of the organization that owns the DVP repository. | +| `DOMAIN` | The company domain that pulled both your DVP image and the paired image (e.g., `example.com`). | +| `DVPP_IMAGE` | Your Docker Verified Publisher image repository name. | +| `PAIRED_IMAGE` | The other image repository that was used together with your DVP image by this company. | + +Each row represents a unique combination of a company domain, your DVP image, +and another image that were used together during the specified time period. + +> [!NOTE] +> +> To protect privacy and ensure data quality, this report excludes personal +> email domains, disposable email services, and university domains. Only +> business and organizational domains are included in the analysis. + +### Tracked companies report + +The tracked companies report provides detailed insights into how specific +companies are using your Docker Verified Publisher (DVP) images. This report +helps you understand usage patterns, deployment environments, and adoption +trends across your customer base and potential prospects. + +You can use this report to answer questions like: + +- How are specific companies using my images across different environments? +- What deployment patterns do I see across local development, CI/CD, and + production? +- Which companies are heavy users of my images? +- How does usage vary by geography and cloud providers for tracked companies? + +To access the report: + +1. Sign in to [Docker Hub](https://hub.docker.com/). +2. Select **My Hub** in the top navigation. +3. Select your organization in the left navigation. +4. Select **Analytics** > **Tracked Companies** in the left navigation. +5. Select **DATA BY WEEK** or **DATA BY MONTH** to choose the data granularity. +6. Select **Download report** for the desired week or month. + +The tracked companies report is a CSV file that contains the following data +points: + +| Field | Description | +| ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `DATE_GRANULARITY` | Weekly or monthly granularity of the data. | +| `DATE_REFERENCE` | The start date of the week or month in YYYY-MM-DD format. | +| `PUBLISHER_NAME` | The name of the organization that owns the DVP repository. | +| `DOMAIN` | The company domain (e.g., `docker.com`) associated with the image pulls. | +| `IP_COUNTRY` | The country code (ISO 3166-1 alpha-2) where the pull request originated from. | +| `CLOUD_SERVICE_PROVIDER` | The cloud service provider used for the pull request or `no csp` for non-cloud providers. | +| `USER_AGENT` | The client application or tool used to pull the image. | +| `INFERRED_USE_CASE` | The inferred deployment environment based on user agent and cloud provider analysis. Values include:
• `Local Dev`: Local development environment (e.g., Docker Desktop, direct `docker` commands)
• `CI/CD`: Continuous integration/deployment pipelines (e.g., containerd, build tools, registry mirroring)
• `Prod`: Production environments (e.g., Kubernetes, container orchestration platforms)
• `Unknown`: Unable to determine the use case from available data | +| `IMAGE_REPOSITORY` | The specific DVP image repository that was pulled. | +| `DATA_DOWNLOADS` | The number of actual image layer downloads for this combination. | +| `VERSION_CHECKS` | The number of version checks (HEAD requests) performed without downloading the full image. | +| `PULLS` | The total number of pull requests (data downloads + version checks). | +| `UNIQUE_AUTHENTICATED_USERS` | The number of unique authenticated users from this domain who performed pulls. | + +> [!NOTE] +> +> Use case inference is determined by analyzing user agent patterns and cloud +> service provider usage. Local development tools used on cloud infrastructure +> are reclassified as CI/CD, and CI/CD tools used on cloud infrastructure are +> reclassified as production to better reflect actual deployment patterns. + +{{< /tab >}} +{{< tab name="DSOS & Legacy DVP programs" >}} + +> [!IMPORTANT] > -> Head to the -[Docker Verified Publisher Program](https://www.docker.com/partners/programs/) or [Docker-Sponsored Open Source](https://www.docker.com/community/open-source/application/#) pages -to learn more about the programs. +> The Legacy DVP program applies to existing customers who have not yet renewed +> to DVP Core. The DVP Legacy program is deprecated and will be retired. Contact +> your Docker sales representative or +> [Docker](https://www.docker.com/partners/programs/) for more information. ## View the image's analytics data You can find analytics data for your repositories on the **Insights and analytics** dashboard at the following URL: -`https://hub.docker.com/orgs/{namespace}/insights/images`. The dashboard contains a -visualization of the usage data and a table where you can download +`https://hub.docker.com/orgs/{namespace}/insights/images`. The dashboard +contains a visualization of the usage data and a table where you can download the data as CSV files. To view data in the chart: @@ -34,7 +317,6 @@ To view data in the chart: ![Insights and analytics chart visualization](../../../images/chart.png) - > [!TIP] > > Hovering your cursor over the chart displays a tooltip, showing precise data @@ -42,8 +324,9 @@ To view data in the chart: ### Share analytics data -You can share the visualization with others using the **Share** icon above the chart. -This is a convenient way to share statistics with others in your organization. +You can share the visualization with others using the **Share** icon at the top +of the chart. This is a convenient way to share statistics with others in your +organization. ![Chart share icon](../../../images/chart-share-icon.png) @@ -54,14 +337,19 @@ configuration as you had set up when creating the link. ## Extension analytics data -If you have published Docker Extensions in the Extension marketplace, you can also get analytics about your extension usage, available as CSV files. -You can download extension CSV reports from the **Insights and analytics** dashboard at the following URL: -`https://hub.docker.com/orgs/{namespace}/insights/extensions`. If your Docker namespace contains extensions known in the marketplace, you will see an **Extensions** tab listing CSV files for your extension(s). +If you have published Docker Extensions in the Extension marketplace, you can +also get analytics about your extension usage, available as CSV files. You can +download extension CSV reports from the **Insights and analytics** dashboard at +the following URL: +`https://hub.docker.com/orgs/{namespace}/insights/extensions`. If your Docker +namespace contains extensions known in the marketplace, you will see an +**Extensions** tab listing CSV files for your extension(s). ## Exporting analytics data You can export the analytics data either from the web dashboard, or using the -[DVP Data API](/reference/api/hub/dvp.md). All members of an organization have access to the analytics data. +[DVP Data API](/reference/api/dvp/latest.md). All members of an organization +have access to the analytics data. The data is available as a downloadable CSV file, in a weekly (Monday through Sunday) or monthly format. Monthly data is available from the first day of the @@ -70,14 +358,13 @@ can analyze it manually as a spreadsheet. ### Export data -Export usage data for your organization's images using the Docker Hub website by following these steps: +Export usage data for your organization's images using the Docker Hub website by +following these steps: 1. Sign in to [Docker Hub](https://hub.docker.com/) and select **My Hub**. 2. Choose your organization and select **Analytics**. - ![Organization overview page, with the Insights and Analytics tab](../../../images/organization-tabs.png) - 3. Set the time span for which you want to export analytics data. The downloadable CSV files for summary and raw data appear on the right-hand @@ -89,7 +376,7 @@ Export usage data for your organization's images using the Docker Hub website by The HTTP API endpoints are available at: `https://hub.docker.com/api/publisher/analytics/v1`. Learn how to export data -using the API in the [DVP Data API documentation](/reference/api/hub/dvp.md). +using the API in the [DVP Data API documentation](/reference/api/dvp/latest.md). ## Data points @@ -107,7 +394,7 @@ represents an image pull. | Data point | Description | Date added | | ----------------------------- | ------------------------------------------------------------------------------------------------------------ | ----------------- | | Action | Request type, see [Action classification rules][1]. One of `pull_by_tag`, `pull_by_digest`, `version_check`. | January 1, 2022 | -| Action day | The date part of the timestamp: `YYYY-MM-DD`. | January 1, 2022 | +| Action day | The date part of the timestamp: `YYYY-MM-DD`. | January 1, 2022 | | Country | Request origin country. | January 1, 2022 | | Digest | Image digest. | January 1, 2022 | | HTTP method | HTTP method used in the request, see [registry API documentation][2] for details. | January 1, 2022 | @@ -116,8 +403,8 @@ represents an image pull. | Reference | Image digest or tag used in the request. | January 1, 2022 | | Repository | Docker [repository][4] (image name). | January 1, 2022 | | Tag (included when available) | Tag name that's only available if the request referred to a tag. | January 1, 2022 | -| Timestamp | Date and time of the request: `YYYY-MM-DD 00:00:00`. | January 1, 2022 | -| Type | The industry from which the event originates. One of `business`, `isp`, `hosting`, `education`, `null`. | January 1, 2022 | +| Timestamp | Date and time of the request: `YYYY-MM-DD 00:00:00`. | January 1, 2022 | +| Type | The industry from which the event originates. One of `business`, `isp`, `hosting`, `education`, `null`. | January 1, 2022 | | User agent tool | The application a user used to pull an image (for example, `docker` or `containerd`). | January 1, 2022 | | User agent version | The version of the application used to pull an image. | January 1, 2022 | | Domain | Request origin domain, see [Privacy](#privacy). | October 11, 2022 | @@ -125,7 +412,7 @@ represents an image pull. [1]: #image-pulls-action-classification-rules [2]: /registry/spec/api/ -[3]: /admin/organization/orgs/ +[3]: /admin/organization/setup/orgs/ [4]: /docker-hub/repos/ ### Image pulls summary data @@ -183,32 +470,34 @@ pulls. To provide feedback or ask questions about these rules, There are two levels of extension summary data available: -- Core summary, with basic extension usage information: number of extension installs, uninstalls, and total install all times +- Core summary, with basic extension usage information: number of extension + installs, uninstalls, and total install all times The core-summary-data file contains the following data points for the selected time span: -| Data point | Description | Date added | -| ----------------- | ------------------------------------------------------- | ----------------- | -| Installs | Number of installs for the extension | Feb 1, 2024 | -| TotalInstalls | Number of installs for the extension all times | Feb 1, 2024 | -| Uninstalls | Number of uninstalls for the extension | Feb 1, 2024 | -| TotalUninstalls | Number of uninstalls for the extension all times | Feb 1, 2024 | -| Updates | Number of updates for the extension | Feb 1, 2024 | +| Data point | Description | Date added | +| --------------- | ------------------------------------------------ | ----------- | +| Installs | Number of installs for the extension | Feb 1, 2024 | +| TotalInstalls | Number of installs for the extension all times | Feb 1, 2024 | +| Uninstalls | Number of uninstalls for the extension | Feb 1, 2024 | +| TotalUninstalls | Number of uninstalls for the extension all times | Feb 1, 2024 | +| Updates | Number of updates for the extension | Feb 1, 2024 | -- Premium summary, with advanced extension usage information: installs, uninstalls by unique users, extension opening by unique users. +- Premium summary, with advanced extension usage information: installs, + uninstalls by unique users, extension opening by unique users. The core-summary-data file contains the following data points for the selected time span: -| Data point | Description | Date added | -| ----------------- | ------------------------------------------------------- | ----------------- | -| Installs | Number of installs for the extension | Feb 1, 2024 | -| UniqueInstalls | Number of unique users installing the extension | Feb 1, 2024 | -| Uninstalls | Number of uninstalls for the extension | Feb 1, 2024 | -| UniqueUninstalls | Number of unique users uninstalling the extension | Feb 1, 2024 | -| Usage | Number of openings of the extension tab | Feb 1, 2024 | -| UniqueUsers | Number of unique users openings the extension tab | Feb 1, 2024 | +| Data point | Description | Date added | +| ---------------- | ------------------------------------------------- | ----------- | +| Installs | Number of installs for the extension | Feb 1, 2024 | +| UniqueInstalls | Number of unique users installing the extension | Feb 1, 2024 | +| Uninstalls | Number of uninstalls for the extension | Feb 1, 2024 | +| UniqueUninstalls | Number of unique users uninstalling the extension | Feb 1, 2024 | +| Usage | Number of openings of the extension tab | Feb 1, 2024 | +| UniqueUsers | Number of unique users openings the extension tab | Feb 1, 2024 | ## Changes in data over time @@ -222,12 +511,14 @@ from the date of when the field was first introduced, and going forward. Refer to the tables in the [Data points](#data-points) section to see from which date a given data point is available. +{{< /tab >}} +{{< /tabs >}} + ## Privacy This section contains information about privacy-protecting measures that ensures consumers of content on Docker Hub remain completely anonymous. - > [!IMPORTANT] > > Docker never shares any Personally Identifiable Information (PII) as part of @@ -237,10 +528,10 @@ The image pulls summary dataset includes unique IP address count. This data poin includes the number of distinct unique IP addresses that request an image. Individual IP addresses are never shared. -The image pulls raw dataset includes user IP domains as a data point. This is the domain name -associated with the IP address used to pull an image. If the IP type is -`business`, the domain represents the company or organization associated with -that IP address (for example, `docker.com`). For any other IP type that's not -`business`, the domain represents the internet service provider or hosting +The image pulls raw dataset includes user IP domains as a data point. This is +the domain name associated with the IP address used to pull an image. If the IP +type is `business`, the domain represents the company or organization associated +with that IP address (for example, `docker.com`). For any other IP type that's +not `business`, the domain represents the internet service provider or hosting provider used to make the request. On average, only about 30% of all pulls classify as the `business` IP type (this varies between publishers and images). diff --git a/content/manuals/docker-hub/repos/manage/trusted-content/official-images.md b/content/manuals/docker-hub/repos/manage/trusted-content/official-images.md index 206ee0eaa55..07f3e330628 100644 --- a/content/manuals/docker-hub/repos/manage/trusted-content/official-images.md +++ b/content/manuals/docker-hub/repos/manage/trusted-content/official-images.md @@ -10,6 +10,18 @@ aliases: - /docker-hub/official_images/ --- +> [!NOTE] +> +> Docker is retiring Docker Content Trust (DCT) for Docker Official Images +> (DOI). You should start planning to transition to a different image signing +> and verification solution (like [Sigstore](https://www.sigstore.dev/) or +> [Notation](https://github.com/notaryproject/notation#readme)). Docker will +> publish migration guides soon to help you in that effort. Timelines for the +> complete deprecation of DCT are being finalized and will be published soon. +> +> For more details, see +> https://www.docker.com/blog/retiring-docker-content-trust/. + Docker, Inc. sponsors a dedicated team that's responsible for reviewing and publishing all content in Docker Official Images. This team works in collaboration with upstream software maintainers, security experts, and the diff --git a/content/manuals/docker-hub/repos/manage/webhooks.md b/content/manuals/docker-hub/repos/manage/webhooks.md index 67928127f7d..0090bf5381e 100644 --- a/content/manuals/docker-hub/repos/manage/webhooks.md +++ b/content/manuals/docker-hub/repos/manage/webhooks.md @@ -14,7 +14,7 @@ You can use webhooks to cause an action in another service in response to a push To create a webhook: 1. In your chosen repository, select the **Webhooks** tab. 2. Provide a name for the webhook. -3. Provide a destination webhook URL. This is where webhook POST requests are delivered. +3. Provide a destination webhook URL. This is where webhook POST requests are delivered. The URL must be 255 characters or fewer. 4. Select **Create**. ## View webhook delivery history @@ -57,3 +57,7 @@ Webhook payloads have the following JSON format: } } ``` + +> [!NOTE] +> +> The `callback_url` field is a legacy field and is no longer supported. \ No newline at end of file diff --git a/content/manuals/docker-hub/repos/settings.md b/content/manuals/docker-hub/repos/settings.md deleted file mode 100644 index 3b0f917d1eb..00000000000 --- a/content/manuals/docker-hub/repos/settings.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: Learn about personal repository settings in Docker Hub -keywords: Docker Hub, Hub, repositories, settings -title: Personal settings for repositories -linkTitle: Personal settings -toc_max: 3 -weight: 50 ---- - -For your account, you can set personal settings for repositories, including -default repository privacy and autobuild notifications. - -## Default repository privacy - -When creating a new repository in Docker Hub, you are able to specify the -repository visibility. You can also change the visibility at any time in Docker Hub. - -The default setting is useful if you use the `docker push` command to push to a -repository that doesn't exist yet. In this case, Docker Hub automatically -creates the repository with your default repository privacy. - -### Configure default repository privacy - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **My Hub** > **Settings** > **Default privacy**. -3. Select the **Default privacy** for any new repository created. - - - **Public**: All new repositories appear in Docker Hub search results and can be - pulled by everyone. - - **Private**: All new repositories don't appear in Docker Hub search results - and are only accessible to you and collaborators. In addition, if the - repository is created in an organization's namespace, then the repository - is accessible to those with applicable roles or permissions. - -4. Select **Save**. - -## Autobuild notifications - -You can send notifications to your email for all your repositories using -autobuilds. - -### Configure autobuild notifications - -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **My Hub** > **Repositories** > **Settings** > **Notifications**. -3. Select the notifications to receive by email. - - - **Off**: No notifications. - - **Only failures**: Only notifications about failed builds. - - **Everything**: Notifications for successful and failed builds. - -4. Select **Save**. diff --git a/content/manuals/docker-hub/service-accounts.md b/content/manuals/docker-hub/service-accounts.md deleted file mode 100644 index c8694214a1f..00000000000 --- a/content/manuals/docker-hub/service-accounts.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -description: Docker Service accounts -keywords: Docker, service, accounts, Docker Hub -title: Service accounts -weight: 50 ---- - -{{% include "new-plans.md" %}} - -> [!IMPORTANT] -> -> As of December 10, 2024, Enhanced Service Account add-ons are no longer -> available. Existing Service Account agreements will be honored until their -> current term expires, but new purchases or renewals of Enhanced Service -> Account add-ons are no longer available and customers must renew under a new -> subscription plan. -> -> Docker recommends transitioning to [Organization Access Tokens -> (OATs)](../security/for-admins/access-tokens.md), which can provide similar -> functionality. - -A service account is a Docker ID used for automated management of container images or containerized applications. Service accounts are typically used in automated workflows, and don't share Docker IDs with the members in the organization. Common use cases for service accounts include mirroring content on Docker Hub, or tying in image pulls from your CI/CD process. - -## Enhanced Service Account add-on tiers - -Refer to the following table for details on the Enhanced Service Account add-ons: - -| Tier | Pull Rates Per Day\* | -| ------ | ------ | -| 1 | 5,000-10,000 | -| 2 | 10,000-25,000 | -| 3 | 25,000-50,000 | -| 4 | 50,000-100,000 | -| 5 | 100,000+ | - -*The service account may exceed Pulls by up to 25% for up to 20 days during the year without incurring additional fees. Reports on consumption are available upon request. \ No newline at end of file diff --git a/content/manuals/docker-hub/settings.md b/content/manuals/docker-hub/settings.md new file mode 100644 index 00000000000..9e0fccb3df7 --- /dev/null +++ b/content/manuals/docker-hub/settings.md @@ -0,0 +1,98 @@ +--- +description: Learn about settings in Docker Hub +keywords: Docker Hub, Hub, repositories, settings +title: Settings +weight: 25 +aliases: + - /docker-hub/repos/settings/ +--- + +You can configure the following settings in Docker Hub: + +- [Default privacy](#default-privacy): Settings for all repositories within each + namespace +- [Notifications](#notifications): Personal settings for autobuild notifications + +## Default privacy + +You can configure the following default privacy settings for all repositories in +a namespace: + +- [Disable creation of public repos](#disable-creation-of-public-repos): Prevent + organization users from creating public repositories (organization namespaces + only) +- [Configure default repository privacy](#configure-default-repository-privacy): + Set the default repository privacy for new repositories + + +### Disable creation of public repos + +{{< summary-bar feature_name="Disable public repositories" >}} + +Organization owners and editors can prevent creating public repositories within +organization namespaces. You cannot configure this setting for personal account +namespaces. + +> [!NOTE] +> +> Enabling this feature does not affect existing public repositories. Any public +> repositories that already exist will remain public. To make them private, you +> must change their visibility in the individual repository settings. + +To configure the disable public repositories setting for an organization +namespace: + +1. Sign in to [Docker Hub](https://hub.docker.com). +2. Select **My Hub**. +3. Select your organization from the top-left account drop-down. +4. Select **Settings** > **Default privacy**. +5. Toggle **Disable public repositories** to your desired setting. +6. Select **Save**. + +### Configure default repository privacy + +Use the default repository privacy setting to automatically set privacy for +repositories created via `docker push` commands when the repository doesn't +exist yet. In this case, Docker Hub automatically creates the repository with +the default repository privacy for that namespace. + +> [!NOTE] +> +> You cannot configure the default repository privacy setting when **Disable +> public repositories** is enabled. + +To configure the default repository privacy for a namespace: + +1. Sign in to [Docker Hub](https://hub.docker.com). +2. Select **My Hub**. +3. Select your organization or account from the top-left account drop-down. +4. Select **Settings** > **Default privacy**. +5. In **Default repository privacy**, select the desired default privacy setting: + + - **Public**: All new repositories appear in Docker Hub search results and can be + pulled by everyone. + - **Private**: All new repositories don't appear in Docker Hub search results + and are only accessible to you and collaborators. In addition, if the + repository is created in an organization's namespace, then the repository + is accessible to those with applicable roles or permissions. + +6. Select **Save**. + +## Notifications + +You can send notifications to your email for all your repositories using +autobuilds. + +### Configure autobuild notifications + +1. Sign in to [Docker Hub](https://hub.docker.com). +2. Select **My Hub**. +3. Select your personal account from the top-left account drop-down. +4. Select **Settings** > **Notifications**. +5. Select the notifications to receive by email: + + - **Off**: No notifications. + - **Only failures**: Only notifications about failed builds. + - **Everything**: Notifications for successful and failed builds. + +6. Select **Save**. diff --git a/content/manuals/docker-hub/usage/pulls.md b/content/manuals/docker-hub/usage/pulls.md index 2835edad9ea..c04e64c82ec 100644 --- a/content/manuals/docker-hub/usage/pulls.md +++ b/content/manuals/docker-hub/usage/pulls.md @@ -17,7 +17,7 @@ The following pull usage and limits apply based on your subscription, subject to fair use: | User type | Pull rate limit per 6 hours | -|--------------------------|-----------------------------------------| +| ------------------------ | --------------------------------------- | | Business (authenticated) | Unlimited | | Team (authenticated) | Unlimited | | Pro (authenticated) | Unlimited | @@ -28,20 +28,20 @@ fair use: A pull is defined as the following: - - A Docker pull includes both a version check and any download that - occurs as a result of the pull. Depending on the client, a `docker pull` can - verify the existence of an image or tag without downloading it by performing - a version check. - - Version checks do not count towards usage pricing. - - A pull for a normal image makes one pull for a [single - manifest](https://github.com/opencontainers/image-spec/blob/main/manifest.md). - - A pull for a multi-arch image will count as one pull for each - different architecture. +- A Docker pull includes both a version check and any download that + occurs as a result of the pull. Depending on the client, a `docker pull` can + verify the existence of an image or tag without downloading it by performing + a version check. +- Version checks do not count towards usage pricing. +- A pull for a normal image makes one pull for a [single + manifest](https://github.com/opencontainers/image-spec/blob/main/manifest.md). +- A pull for a multi-arch image will count as one pull for each + different architecture. ## Pull attribution Pulls from authenticated users can be attributed to either a personal or an -[organization namespace](/manuals/admin/faqs/general-faqs.md#whats-an-organization-name-or-namespace). +[organization namespace](/manuals/accounts/general-faqs.md#whats-an-organization-name-or-namespace). Attribution is based on the following: @@ -51,19 +51,18 @@ Attribution is based on the following: determined based on domain affiliation and organization membership. - Verified domain ownership: When pulling an image from an account linked to a verified domain, the attribution is set to be the owner of that - [domain](/manuals/security/faqs/single-sign-on/domain-faqs.md). + [domain](/manuals/enterprise/security/single-sign-on/faqs/domain-faqs.md). - Single organization membership: - - If the owner of the verified domain is a company and the user is part of - only one organization within that - [company](../../admin/faqs/company-faqs.md#what-features-are-supported-at-the-company-level), - the pull is attributed to that specific organization. - - If the user is part of only one organization, the pull is attributed to - that specific organization. + - If the owner of the verified domain is a company and the user is part of + only one organization within that + [company](../../admin/company/company-faqs.md), + the pull is attributed to that specific organization. + - If the user is part of only one organization, the pull is attributed to + that specific organization. - Multiple organization memberships: If the user is part of multiple organizations under the company, the pull is attributed to the user's personal namespace. - ### Authentication To ensure correct attribution of your pulls, you must authenticate with Docker @@ -82,15 +81,15 @@ the on-screen instructions to complete the sign-in process. If you're using a standalone version of Docker Engine, run the `docker login` command from a terminal to authenticate with Docker Hub. For information on how -to use the command, see [docker login](/reference/cli/docker/login.md). +to use the command, see [docker login](/reference/cli/docker/login/). #### Docker Swarm If you're running Docker Swarm, you must use the `--with-registry-auth` flag to authenticate with Docker Hub. For more information, see [Create a -service](/reference/cli/docker/service/create.md#with-registry-auth). If you +service](/reference/cli/docker/service/create/#with-registry-auth). If you are using a Docker Compose file to deploy an application stack, see [docker -stack deploy](/reference/cli/docker/stack/deploy.md). +stack deploy](/reference/cli/docker/stack/deploy/). #### GitHub Actions @@ -120,9 +119,9 @@ If you're using any third-party platforms, follow your provider’s instructions - [Artifactory](https://www.jfrog.com/confluence/display/JFROG/Advanced+Settings#AdvancedSettings-RemoteCredentials) - [AWS CodeBuild](https://aws.amazon.com/blogs/devops/how-to-use-docker-images-from-a-private-registry-in-aws-codebuild-for-your-build-environment/) - [AWS ECS/Fargate](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/private-auth.html) -- [Azure Pipelines](https://docs.microsoft.com/en-us/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml#sep-docreg) +- [Azure Pipelines](https://learn.microsoft.com/en-us/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) - [Chipper CI](https://docs.chipperci.com/builds/docker/#rate-limit-auth) -- [CircleCI](https://circleci.com/docs/2.0/private-images/) +- [CircleCI](https://circleci.com/docs/guides/execution-managed/private-images/) - [Codefresh](https://codefresh.io/docs/docs/docker-registries/external-docker-registries/docker-hub/) - [Drone.io](https://docs.drone.io/pipeline/docker/syntax/images/#pulling-private-images) - [GitLab](https://docs.gitlab.com/ee/user/packages/container_registry/#authenticate-with-the-container-registry) @@ -137,7 +136,7 @@ On that page, you can also send a report to your email that contains a comma separated file with the following detailed information. | CSV column | Definition | Usage guidance | -|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `datehour` | The date and hour (`yyyy/mm/dd/hh`) of the pull that resulted in the data transfer. | This helps in identifying peak usage times and patterns. | | `user_name` | The Docker ID of the user that pulled the image | This lets organization owners track data consumption per user and manage resources effectively. | | `repository` | The name of the repository of the image that was pulled. | This lets you identify which repositories are most frequently accessed and consume most of the data transfer. | @@ -147,7 +146,7 @@ separated file with the following detailed information. | `tag` | The tag for the image. The tag is only available if the pull included a tag. | This helps in identifying the image. Tags are often used to identify specific versions or variants of an image. | | `digest` | The unique image digest for the image. | This helps in identifying the image. | | `version_checks` | The number of version checks accumulated for the date and hour of each image repository. Depending on the client, a pull can do a version check to verify the existence of an image or tag without downloading it. | This helps identify the frequency of version checks, which you can use to analyze usage trends and potential unexpected behaviors. | -| `pulls` | The number of pulls accumulated for the date and hour of each image repository. | This helps identify the frequency of repository pulls, which you can use to analyze usage trends and potential unexpected behaviors. | +| `pulls` | The number of pulls accumulated for the date and hour of each image repository. | This helps identify the frequency of repository pulls, which you can use to analyze usage trends and potential unexpected behaviors. | ## View pull rate and limit @@ -169,28 +168,26 @@ To view your current pull rate and limit: > [!NOTE] > -> To check your limits, you need `curl`, `grep`, and `jq` installed. +> To check your limits, you need `curl` and `jq` installed. 1. Get a token. - - To get a token anonymously, if you are pulling anonymously: - ```console - $ TOKEN=$(curl "https://auth.docker.io/token?service=registry.docker.io&scope=repository:ratelimitpreview/test:pull" | jq -r .token) - ``` + ```console + $ TOKEN=$(curl "https://auth.docker.io/token?service=registry.docker.io&scope=repository:ratelimitpreview/test:pull" | jq -r .token) + ``` - To get a token with a user account, if you are authenticated, insert your username and password in the following command: - ```console - $ TOKEN=$(curl --user 'username:password' "https://auth.docker.io/token?service=registry.docker.io&scope=repository:ratelimitpreview/test:pull" | jq -r .token) - ``` + ```console + $ TOKEN=$(curl --user 'username:password' "https://auth.docker.io/token?service=registry.docker.io&scope=repository:ratelimitpreview/test:pull" | jq -r .token) + ``` 2. Get the headers that contain your limits. These headers are returned on both GET and HEAD requests. Using GET emulates a real pull and counts towards the limit. Using HEAD won't. - ```console $ curl --head -H "Authorization: Bearer $TOKEN" https://registry-1.docker.io/v2/ratelimitpreview/test/manifests/latest ``` @@ -209,6 +206,5 @@ To view your current pull rate and limit: If you don't see any `ratelimit` header, it could be because the image or your IP is unlimited in partnership with a publisher, provider, or an open source organization. It could also mean that the user you are pulling as is part of a - paid Docker plan. Pulling that image won't count toward pull rate limits if you + paid Docker subscription. Pulling that image won't count toward pull rate limits if you don't see these headers. - diff --git a/content/manuals/engine/_index.md b/content/manuals/engine/_index.md index febf3717d81..846416706f8 100644 --- a/content/manuals/engine/_index.md +++ b/content/manuals/engine/_index.md @@ -5,7 +5,7 @@ description: Find a comprehensive overview of Docker Engine, including how to in keywords: Engine params: sidebar: - group: Open source + group: Application development grid: - title: Install Docker Engine description: Learn how to install the open source Docker Engine for your distribution. @@ -74,8 +74,7 @@ For more details, see ## Licensing -The Docker Engine is licensed under the Apache License, Version 2.0. See -[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full license -text. - -However, for commercial use of Docker Engine obtained via Docker Desktop within larger enterprises (exceeding 250 employees OR with annual revenue surpassing $10 million USD), a [paid subscription](https://www.docker.com/pricing/) is required. +Commercial use of Docker Engine obtained via Docker Desktop +within larger enterprises (exceeding 250 employees OR with annual revenue surpassing +$10 million USD), requires a [paid subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsEngine). +Apache License, Version 2.0. See [LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full license. diff --git a/content/manuals/engine/cli/filter.md b/content/manuals/engine/cli/filter.md index e51fb633470..9943a400956 100644 --- a/content/manuals/engine/cli/filter.md +++ b/content/manuals/engine/cli/filter.md @@ -30,15 +30,15 @@ output of the `docker images` command to only print `alpine` images. ```console $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ubuntu 20.04 33a5cc25d22c 36 minutes ago 101MB -ubuntu 18.04 152dc042452c 36 minutes ago 88.1MB -alpine 3.16 a8cbb8c69ee7 40 minutes ago 8.67MB +ubuntu 24.04 33a5cc25d22c 36 minutes ago 101MB +ubuntu 22.04 152dc042452c 36 minutes ago 88.1MB +alpine 3.21 a8cbb8c69ee7 40 minutes ago 8.67MB alpine latest 7144f7bab3d4 40 minutes ago 11.7MB busybox uclibc 3e516f71d880 48 minutes ago 2.4MB busybox glibc 7338d0c72c65 48 minutes ago 6.09MB $ docker images --filter reference=alpine REPOSITORY TAG IMAGE ID CREATED SIZE -alpine 3.16 a8cbb8c69ee7 40 minutes ago 8.67MB +alpine 3.21 a8cbb8c69ee7 40 minutes ago 8.67MB alpine latest 7144f7bab3d4 40 minutes ago 11.7MB ``` @@ -58,9 +58,9 @@ following example shows how to print all images that match `alpine:latest` or ```console $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ubuntu 20.04 33a5cc25d22c 2 hours ago 101MB -ubuntu 18.04 152dc042452c 2 hours ago 88.1MB -alpine 3.16 a8cbb8c69ee7 2 hours ago 8.67MB +ubuntu 24.04 33a5cc25d22c 2 hours ago 101MB +ubuntu 22.04 152dc042452c 2 hours ago 88.1MB +alpine 3.21 a8cbb8c69ee7 2 hours ago 8.67MB alpine latest 7144f7bab3d4 2 hours ago 11.7MB busybox uclibc 3e516f71d880 2 hours ago 2.4MB busybox glibc 7338d0c72c65 2 hours ago 6.09MB @@ -95,21 +95,21 @@ $ docker container prune --filter "label!=foo" --filter "label!=bar" For more information about filtering commands, refer to the CLI reference description for commands that support the `--filter` flag: -- [`docker config ls`](/reference/cli/docker/config/ls.md) -- [`docker container prune`](/reference/cli/docker/container/prune.md) -- [`docker image prune`](/reference/cli/docker/image/prune.md) -- [`docker image ls`](/reference/cli/docker/image/ls.md) -- [`docker network ls`](/reference/cli/docker/network/ls.md) -- [`docker network prune`](/reference/cli/docker/network/prune.md) -- [`docker node ls`](/reference/cli/docker/node/ls.md) -- [`docker node ps`](/reference/cli/docker/node/ps.md) -- [`docker plugin ls`](/reference/cli/docker/plugin/ls.md) -- [`docker container ls`](/reference/cli/docker/container/ls.md) -- [`docker search`](/reference/cli/docker/search.md) -- [`docker secret ls`](/reference/cli/docker/secret/ls.md) -- [`docker service ls`](/reference/cli/docker/service/ls.md) -- [`docker service ps`](/reference/cli/docker/service/ps.md) -- [`docker stack ps`](/reference/cli/docker/stack/ps.md) -- [`docker system prune`](/reference/cli/docker/system/prune.md) -- [`docker volume ls`](/reference/cli/docker/volume/ls.md) -- [`docker volume prune`](/reference/cli/docker/volume/prune.md) +- [`docker config ls`](/reference/cli/docker/config/ls/) +- [`docker container prune`](/reference/cli/docker/container/prune/) +- [`docker image prune`](/reference/cli/docker/image/prune/) +- [`docker image ls`](/reference/cli/docker/image/ls/) +- [`docker network ls`](/reference/cli/docker/network/ls/) +- [`docker network prune`](/reference/cli/docker/network/prune/) +- [`docker node ls`](/reference/cli/docker/node/ls/) +- [`docker node ps`](/reference/cli/docker/node/ps/) +- [`docker plugin ls`](/reference/cli/docker/plugin/ls/) +- [`docker container ls`](/reference/cli/docker/container/ls/) +- [`docker search`](/reference/cli/docker/search/) +- [`docker secret ls`](/reference/cli/docker/secret/ls/) +- [`docker service ls`](/reference/cli/docker/service/ls/) +- [`docker service ps`](/reference/cli/docker/service/ps/) +- [`docker stack ps`](/reference/cli/docker/stack/ps/) +- [`docker system prune`](/reference/cli/docker/system/prune/) +- [`docker volume ls`](/reference/cli/docker/volume/ls/) +- [`docker volume prune`](/reference/cli/docker/volume/prune/) diff --git a/content/manuals/engine/cli/formatting.md b/content/manuals/engine/cli/formatting.md index ad20c8c1cd9..4e8c24c8153 100644 --- a/content/manuals/engine/cli/formatting.md +++ b/content/manuals/engine/cli/formatting.md @@ -18,7 +18,7 @@ include examples of customizing the output format. > [!NOTE] > -> When using the `--format` flag, you need observe your shell environment. +> When using the `--format` flag, you need to observe your shell environment. > In a POSIX shell, you can run the following with a single quote: > > ```console diff --git a/content/manuals/engine/cli/otel.md b/content/manuals/engine/cli/otel.md index 8ec3bbf88b7..0a75441211d 100644 --- a/content/manuals/engine/cli/otel.md +++ b/content/manuals/engine/cli/otel.md @@ -155,7 +155,7 @@ With these files in place: ## Available metrics -Docker CLI currently exports a single metric, `command.time`, which measures +Docker CLI exports a single metric, `command.time`, which measures the execution duration of a command in milliseconds. This metric has the following attributes: diff --git a/content/manuals/engine/containers/gpu.md b/content/manuals/engine/containers/gpu.md new file mode 100644 index 00000000000..09d73d4d568 --- /dev/null +++ b/content/manuals/engine/containers/gpu.md @@ -0,0 +1,112 @@ +--- +title: GPU access +weight: 40 +description: How to access NVIDIA GPUs from a container +keywords: docker, GPU, NVIDIA, cuda, nvidia-smi, device, container toolkit +--- + +## Access an NVIDIA GPU + +### Prerequisites + +Visit the official [NVIDIA drivers page](https://www.nvidia.com/Download/index.aspx) +to download and install the proper drivers. Reboot your system once you have +done so. + +Verify that your GPU is running and accessible. + +### Install NVIDIA Container Toolkit + +Follow the official NVIDIA Container Toolkit [installation instructions](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html). + +### Expose GPUs for use + +Include the `--gpus` flag when you start a container to access GPU resources. + +To expose all available GPUs: + +```console +$ docker run -it --rm --gpus all ubuntu nvidia-smi +``` + +The output looks like the following: + +```text ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.288.01 Driver Version: 535.288.01 CUDA Version: 12.2 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA L4 Off | 00000000:31:00.0 Off | 0 | +| N/A 40C P0 27W / 72W | 0MiB / 23034MiB | 4% Default | +| | | N/A | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| +| No running processes found | ++---------------------------------------------------------------------------------------+ +``` + +The leftmost column in the GPU table shows the index of each GPU (`0` for the +NVIDIA L4 in the previous example). Use these index numbers to target specific GPUs +with the `device` option. + +To expose a single GPU by index: + +```console +$ docker run -it --rm --gpus device=0 ubuntu nvidia-smi +``` + +To expose a GPU by its UUID, first list UUIDs with `nvidia-smi -L`: + +```console +$ nvidia-smi -L +GPU 0: NVIDIA L4 (UUID: GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a) +``` + +Then pass the UUID to `--gpus`: + +```console +$ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi +``` + +On systems with multiple GPUs, you can expose several by index. The `device` +value must be quoted because it contains a comma: + +```console +$ docker run -it --rm --gpus '"device=0,2"' ubuntu nvidia-smi +``` + +This exposes the GPUs at index `0` and `2` — the first and third GPUs listed in +`nvidia-smi` output. + +> [!NOTE] +> +> NVIDIA GPUs can only be accessed by systems running a single engine. + +### Set NVIDIA capabilities + +You can set capabilities manually. For example, on Ubuntu you can run the +following: + +```console +$ docker run --gpus 'all,capabilities=utility' --rm ubuntu nvidia-smi +``` + +This enables the `utility` driver capability which adds the `nvidia-smi` tool to +the container. + +Capabilities and other configurations can be set in images via environment +variables. For valid variables, see the +[nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/docker-specialized.html) +documentation. These variables can be set in a Dockerfile. + +You can also use CUDA images, which set these variables automatically. See the +official [CUDA images](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda) +NGC catalog page. diff --git a/content/manuals/engine/containers/resource_constraints.md b/content/manuals/engine/containers/resource_constraints.md index 5f9efc616eb..76df2f43cda 100644 --- a/content/manuals/engine/containers/resource_constraints.md +++ b/content/manuals/engine/containers/resource_constraints.md @@ -1,8 +1,8 @@ --- title: Resource constraints weight: 30 -description: Specify the runtime options for a container -keywords: docker, daemon, configuration, runtime +description: Limit container memory and CPU usage with runtime configuration flags +keywords: resource constraints, memory limits, CPU limits, cgroups, OOM, swap, docker run, memory swap aliases: - /engine/admin/resource_constraints/ - /config/containers/resource_constraints/ @@ -16,7 +16,7 @@ on when you should set such limits and the possible implications of setting them Many of these features require your kernel to support Linux capabilities. To check for support, you can use the -[`docker info`](/reference/cli/docker/system/info.md) command. If a capability +[`docker info`](/reference/cli/docker/system/info/) command. If a capability is disabled in your kernel, you may see a warning at the end of the output like the following: @@ -86,7 +86,6 @@ Most of these options take a positive integer, followed by a suffix of `b`, `k`, | `--memory-swap`\* | The amount of memory this container is allowed to swap to disk. See [`--memory-swap` details](#--memory-swap-details). | | `--memory-swappiness` | By default, the host kernel can swap out a percentage of anonymous pages used by a container. You can set `--memory-swappiness` to a value between 0 and 100, to tune this percentage. See [`--memory-swappiness` details](#--memory-swappiness-details). | | `--memory-reservation` | Allows you to specify a soft limit smaller than `--memory` which is activated when Docker detects contention or low memory on the host machine. If you use `--memory-reservation`, it must be set lower than `--memory` for it to take precedence. Because it is a soft limit, it doesn't guarantee that the container doesn't exceed the limit. | -| `--kernel-memory` | The maximum amount of kernel memory the container can use. The minimum allowed value is `6m`. Because kernel memory can't be swapped out, a container which is starved of kernel memory may block host machine resources, which can have side effects on the host machine and on other containers. See [`--kernel-memory` details](#--kernel-memory-details). | | `--oom-kill-disable` | By default, if an out-of-memory (OOM) error occurs, the kernel kills processes in a container. To change this behavior, use the `--oom-kill-disable` option. Only disable the OOM killer on containers where you have also set the `-m/--memory` option. If the `-m` flag isn't set, the host can run out of memory and the kernel may need to kill the host system's processes to free memory. | For more information about cgroups and memory in general, see the documentation @@ -139,34 +138,6 @@ of physical memory that can be used. - By default, if you don't set `--memory-swappiness`, the value is inherited from the host machine. -### `--kernel-memory` details - -Kernel memory limits are expressed in terms of the overall memory allocated to -a container. Consider the following scenarios: - -- **Unlimited memory, unlimited kernel memory**: This is the default - behavior. -- **Unlimited memory, limited kernel memory**: This is appropriate when the - amount of memory needed by all cgroups is greater than the amount of - memory that actually exists on the host machine. You can configure the - kernel memory to never go over what's available on the host machine, - and containers which need more memory need to wait for it. -- **Limited memory, unlimited kernel memory**: The overall memory is - limited, but the kernel memory isn't. -- **Limited memory, limited kernel memory**: Limiting both user and kernel - memory can be useful for debugging memory-related problems. If a container - is using an unexpected amount of either type of memory, it runs out - of memory without affecting other containers or the host machine. Within - this setting, if the kernel memory limit is lower than the user memory - limit, running out of kernel memory causes the container to experience - an OOM error. If the kernel memory limit is higher than the user memory - limit, the kernel limit doesn't cause the container to experience an OOM. - -When you enable kernel memory limits, the host machine tracks the "high water mark" -statistics on a per-process basis, so you can track which processes (in this -case, containers) are using excess memory. This can be seen per process by -viewing `/proc//status` on the host machine. - ## CPU By default, each container's access to the host machine's CPU cycles is unlimited. @@ -265,84 +236,5 @@ If the kernel or Docker daemon isn't configured correctly, an error occurs. ## GPU -### Access an NVIDIA GPU - -#### Prerequisites - -Visit the official [NVIDIA drivers page](https://www.nvidia.com/Download/index.aspx) -to download and install the proper drivers. Reboot your system once you have -done so. - -Verify that your GPU is running and accessible. - -#### Install nvidia-container-toolkit - -Follow the official NVIDIA Container Toolkit [installation instructions](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html). - -#### Expose GPUs for use - -Include the `--gpus` flag when you start a container to access GPU resources. -Specify how many GPUs to use. For example: - -```console -$ docker run -it --rm --gpus all ubuntu nvidia-smi -``` - -Exposes all available GPUs and returns a result akin to the following: - -```bash -+-------------------------------------------------------------------------------+ -| NVIDIA-SMI 384.130 Driver Version: 384.130 | -|-------------------------------+----------------------+------------------------+ -| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | -| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | -|===============================+======================+========================| -| 0 GRID K520 Off | 00000000:00:03.0 Off | N/A | -| N/A 36C P0 39W / 125W | 0MiB / 4036MiB | 0% Default | -+-------------------------------+----------------------+------------------------+ -+-------------------------------------------------------------------------------+ -| Processes: GPU Memory | -| GPU PID Type Process name Usage | -|===============================================================================| -| No running processes found | -+-------------------------------------------------------------------------------+ -``` - -Use the `device` option to specify GPUs. For example: - -```console -$ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi -``` - -Exposes that specific GPU. - -```console -$ docker run -it --rm --gpus '"device=0,2"' ubuntu nvidia-smi -``` - -Exposes the first and third GPUs. - -> [!NOTE] -> -> NVIDIA GPUs can only be accessed by systems running a single engine. - -#### Set NVIDIA capabilities - -You can set capabilities manually. For example, on Ubuntu you can run the -following: - -```console -$ docker run --gpus 'all,capabilities=utility' --rm ubuntu nvidia-smi -``` - -This enables the `utility` driver capability which adds the `nvidia-smi` tool to -the container. - -Capabilities as well as other configurations can be set in images via -environment variables. More information on valid variables can be found in the -[nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/docker-specialized.html) -documentation. These variables can be set in a Dockerfile. - -You can also use CUDA images, which set these variables automatically. See the -official [CUDA images](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda) -NGC catalog page. +For information on how to access NVIDIA GPUs from a container, see +[GPU access](gpu.md). diff --git a/content/manuals/engine/containers/runmetrics.md b/content/manuals/engine/containers/runmetrics.md index 22a9f3ba466..62142d3cace 100644 --- a/content/manuals/engine/containers/runmetrics.md +++ b/content/manuals/engine/containers/runmetrics.md @@ -27,7 +27,7 @@ redis1 0.07% 796 KB / 64 MB 1.21% redis2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B ``` -The [`docker stats`](/reference/cli/docker/container/stats.md) reference +The [`docker stats`](/reference/cli/docker/container/stats/) reference page has more details about the `docker stats` command. ## Control groups @@ -114,7 +114,7 @@ Note that the cgroup v2 mode behaves slightly different from the cgroup v1 mode: - The default cgroup driver (`dockerd --exec-opt native.cgroupdriver`) is `systemd` on v2, `cgroupfs` on v1. - The default cgroup namespace mode (`docker run --cgroupns`) is `private` on v2, `host` on v1. -- The `docker run` flags `--oom-kill-disable` and `--kernel-memory` are discarded on v2. +- The `docker run` flag `--oom-kill-disable` is discarded on v2. ### Find the cgroup for a given container @@ -200,74 +200,72 @@ indicates the number of page faults since the creation of the cgroup. `cache` : The amount of memory used by the processes of this control group that can be - associated precisely with a block on a block device. When you read from and - write to files on disk, this amount increases. This is the case if you use - "conventional" I/O (`open`, `read`, `write` syscalls) as well as mapped files - (with `mmap`). It also accounts for the memory used by `tmpfs` mounts, though - the reasons are unclear. +associated precisely with a block on a block device. When you read from and +write to files on disk, this amount increases. This is the case if you use +"conventional" I/O (`open`, `read`, `write` syscalls) as well as mapped files +(with `mmap`). It also accounts for the memory used by `tmpfs` mounts, though +the reasons are unclear. `rss` : The amount of memory that doesn't correspond to anything on disk: stacks, - heaps, and anonymous memory maps. +heaps, and anonymous memory maps. `mapped_file` : Indicates the amount of memory mapped by the processes in the control group. - It doesn't give you information about how much memory is used; it rather - tells you how it's used. +It doesn't give you information about how much memory is used; it rather +tells you how it's used. `pgfault`, `pgmajfault` : Indicate the number of times that a process of the cgroup triggered a "page - fault" and a "major fault", respectively. A page fault happens when a process - accesses a part of its virtual memory space which is nonexistent or protected. - The former can happen if the process is buggy and tries to access an invalid - address (it is sent a `SIGSEGV` signal, typically killing it with the famous - `Segmentation fault` message). The latter can happen when the process reads - from a memory zone which has been swapped out, or which corresponds to a mapped - file: in that case, the kernel loads the page from disk, and let the CPU - complete the memory access. It can also happen when the process writes to a - copy-on-write memory zone: likewise, the kernel preempts the process, duplicate - the memory page, and resume the write operation on the process's own copy of - the page. "Major" faults happen when the kernel actually needs to read the data - from disk. When it just duplicates an existing page, or allocate an empty page, - it's a regular (or "minor") fault. +fault" and a "major fault", respectively. A page fault happens when a process +accesses a virtual memory page that is not currently mapped to a physical +memory frame. This is a normal part of memory management. For example, a page +fault occurs when the process reads from a memory zone that has been swapped +out, or that corresponds to a memory-mapped file: in that case, the kernel +loads the page from disk and lets the CPU complete the memory access. It also +happens when the process writes to a copy-on-write memory zone: the kernel +duplicates the memory page and resumes the write operation on the process's +own copy of the page. "Major" faults happen when the kernel needs to read +data from disk. When it duplicates an existing page, or allocates an empty +page, it's a regular (or "minor") fault. `swap` : The amount of swap currently used by the processes in this cgroup. `active_anon`, `inactive_anon` : The amount of anonymous memory that has been identified has respectively - _active_ and _inactive_ by the kernel. "Anonymous" memory is the memory that is - _not_ linked to disk pages. In other words, that's the equivalent of the rss - counter described above. In fact, the very definition of the rss counter is - `active_anon` + `inactive_anon` - `tmpfs` (where tmpfs is the amount of - memory used up by `tmpfs` filesystems mounted by this control group). Now, - what's the difference between "active" and "inactive"? Pages are initially - "active"; and at regular intervals, the kernel sweeps over the memory, and tags - some pages as "inactive". Whenever they're accessed again, they're - immediately re-tagged "active". When the kernel is almost out of memory, and - time comes to swap out to disk, the kernel swaps "inactive" pages. +_active_ and _inactive_ by the kernel. "Anonymous" memory is the memory that is +_not_ linked to disk pages. In other words, that's the equivalent of the rss +counter described above. In fact, the very definition of the rss counter is +`active_anon` + `inactive_anon` - `tmpfs` (where tmpfs is the amount of +memory used up by `tmpfs` filesystems mounted by this control group). Now, +what's the difference between "active" and "inactive"? Pages are initially +"active"; and at regular intervals, the kernel sweeps over the memory, and tags +some pages as "inactive". Whenever they're accessed again, they're +immediately re-tagged "active". When the kernel is almost out of memory, and +time comes to swap out to disk, the kernel swaps "inactive" pages. `active_file`, `inactive_file` : Cache memory, with _active_ and _inactive_ similar to the _anon_ memory - above. The exact formula is `cache` = `active_file` + `inactive_file` + - `tmpfs`. The exact rules used by the kernel to move memory pages between - active and inactive sets are different from the ones used for anonymous memory, - but the general principle is the same. When the kernel needs to reclaim memory, - it's cheaper to reclaim a clean (=non modified) page from this pool, since it - can be reclaimed immediately (while anonymous pages and dirty/modified pages - need to be written to disk first). +above. The exact formula is `cache` = `active_file` + `inactive_file` + +`tmpfs`. The exact rules used by the kernel to move memory pages between +active and inactive sets are different from the ones used for anonymous memory, +but the general principle is the same. When the kernel needs to reclaim memory, +it's cheaper to reclaim a clean (=non modified) page from this pool, since it +can be reclaimed immediately (while anonymous pages and dirty/modified pages +need to be written to disk first). `unevictable` : The amount of memory that cannot be reclaimed; generally, it accounts for - memory that has been "locked" with `mlock`. It's often used by crypto - frameworks to make sure that secret keys and other sensitive material never - gets swapped out to disk. +memory that has been "locked" with `mlock`. It's often used by crypto +frameworks to make sure that secret keys and other sensitive material never +gets swapped out to disk. `memory_limit`, `memsw_limit` : These aren't really metrics, but a reminder of the limits applied to this - cgroup. The first one indicates the maximum amount of physical memory that can - be used by the processes of this control group; the second one indicates the - maximum amount of RAM+swap. +cgroup. The first one indicates the maximum amount of physical memory that can +be used by the processes of this control group; the second one indicates the +maximum amount of RAM+swap. Accounting for memory in the page cache is very complex. If two processes in different control groups both read the same file @@ -309,28 +307,28 @@ relevant ones: `blkio.sectors` : Contains the number of 512-bytes sectors read and written by the processes - member of the cgroup, device by device. Reads and writes are merged in a single - counter. +member of the cgroup, device by device. Reads and writes are merged in a single +counter. `blkio.io_service_bytes` : Indicates the number of bytes read and written by the cgroup. It has 4 - counters per device, because for each device, it differentiates between - synchronous vs. asynchronous I/O, and reads vs. writes. +counters per device, because for each device, it differentiates between +synchronous vs. asynchronous I/O, and reads vs. writes. `blkio.io_serviced` : The number of I/O operations performed, regardless of their size. It also has - 4 counters per device. +4 counters per device. `blkio.io_queued` : Indicates the number of I/O operations currently queued for this cgroup. In - other words, if the cgroup isn't doing any I/O, this is zero. The opposite is - not true. In other words, if there is no I/O queued, it doesn't mean that the - cgroup is idle (I/O-wise). It could be doing purely synchronous reads on an - otherwise quiescent device, which can therefore handle them immediately, - without queuing. Also, while it's helpful to figure out which cgroup is - putting stress on the I/O subsystem, keep in mind that it's a relative - quantity. Even if a process group doesn't perform more I/O, its queue size can - increase just because the device load increases because of other devices. +other words, if the cgroup isn't doing any I/O, this is zero. The opposite is +not true. In other words, if there is no I/O queued, it doesn't mean that the +cgroup is idle (I/O-wise). It could be doing purely synchronous reads on an +otherwise quiescent device, which can therefore handle them immediately, +without queuing. Also, while it's helpful to figure out which cgroup is +putting stress on the I/O subsystem, keep in mind that it's a relative +quantity. Even if a process group doesn't perform more I/O, its queue size can +increase just because the device load increases because of other devices. ### Network metrics diff --git a/content/manuals/engine/containers/start-containers-automatically.md b/content/manuals/engine/containers/start-containers-automatically.md index 213ae635c54..c513208bf8f 100644 --- a/content/manuals/engine/containers/start-containers-automatically.md +++ b/content/manuals/engine/containers/start-containers-automatically.md @@ -10,7 +10,7 @@ aliases: - /config/containers/start-containers-automatically/ --- -Docker provides [restart policies](/manuals/engine/containers/run.md#restart-policies---restart) +Docker provides [restart policies](/reference/cli/docker/container/run/#restart) to control whether your containers start automatically when they exit, or when Docker restarts. Restart policies start linked containers in the correct order. Docker recommends that you use restart policies, and avoid using process @@ -22,7 +22,7 @@ a Docker upgrade, though networking and user input are interrupted. ## Use a restart policy -To configure the restart policy for a container, use the `--restart` flag +To configure the restart policy for a container, use the [`--restart`](/reference/cli/docker/container/run/#restart) flag when using the `docker run` command. The value of the `--restart` flag can be any of the following: @@ -68,7 +68,7 @@ Keep the following in mind when using restart policies: - Restart policies only apply to containers. To configure restart policies for Swarm services, see - [flags related to service restart](/reference/cli/docker/service/create.md). + [flags related to service restart](/reference/cli/docker/service/create/). ### Restarting foreground containers diff --git a/content/manuals/engine/daemon/_index.md b/content/manuals/engine/daemon/_index.md index 3b1ee08307a..32fcab96287 100644 --- a/content/manuals/engine/daemon/_index.md +++ b/content/manuals/engine/daemon/_index.md @@ -98,13 +98,24 @@ The Docker daemon persists all data in a single directory. This tracks everything related to Docker, including containers, images, volumes, service definition, and secrets. -By default this directory is: +By default the daemon stores data in: -- `/var/lib/docker` on Linux. -- `C:\ProgramData\docker` on Windows. +- `/var/lib/docker` on Linux +- `C:\ProgramData\docker` on Windows -You can configure the Docker daemon to use a different directory, using the -`data-root` configuration option. For example: +When using the [containerd image store](/manuals/engine/storage/containerd.md) +(the default for Docker Engine 29.0 and later on fresh installations), image +contents and container snapshots are stored in `/var/lib/containerd`. Other +daemon data (volumes, configs) remains in `/var/lib/docker`. + +When using [classic storage drivers](/manuals/engine/storage/drivers/_index.md) +like `overlay2` (the default for upgraded installations), all data is stored in +`/var/lib/docker`. + +### Configure the data directory location + +You can configure the Docker daemon to use a different storage directory using +the `data-root` configuration option. ```json { @@ -112,10 +123,19 @@ You can configure the Docker daemon to use a different directory, using the } ``` -Since the state of a Docker daemon is kept on this directory, make sure you use -a dedicated directory for each daemon. If two daemons share the same directory, -for example, an NFS share, you are going to experience errors that are difficult -to troubleshoot. +The `data-root` option does not affect image and container data stored in +`/var/lib/containerd` when using the containerd image store. To change the +storage location of containerd snapshotters, use the system containerd +configuration file: + +```toml {title="/etc/containerd/config.toml"} +version = 2 +root = "/mnt/containerd-data" +``` + +Make sure you use a dedicated directory for each daemon. If two daemons share +the same directory, for example an NFS share, you will experience errors that +are difficult to troubleshoot. ## Next steps diff --git a/content/manuals/engine/daemon/ipv6.md b/content/manuals/engine/daemon/ipv6.md index 1ed5ca00b05..df7041aab87 100644 --- a/content/manuals/engine/daemon/ipv6.md +++ b/content/manuals/engine/daemon/ipv6.md @@ -35,6 +35,13 @@ IPv6 is only supported on Docker daemons running on Linux hosts. - subnet: 2001:db8::/64 ``` +> [!NOTE] +> +> The address `2001:db8::/64` in these examples is +> [reserved for use in documentation][wikipedia-ipv6-reserved]. +> Replace it with a valid IPv6 network, for example a +> [Unique Local Address (ULA)][wikipedia-ipv6-ula] subnet from `fd00::/8`. + You can now run containers that attach to the `ip6net` network. ```console @@ -74,6 +81,13 @@ The following steps show you how to use IPv6 on the default bridge network. } ``` + > [!NOTE] + > + > The address `2001:db8:1::/64` in this example is + > [reserved for use in documentation][wikipedia-ipv6-reserved]. + > Replace it with a valid IPv6 network, for example a + > [Unique Local Address (ULA)][wikipedia-ipv6-ula] subnet from `fd00::/8`. + - `ipv6` enables IPv6 networking on the default network. - `fixed-cidr-v6` assigns a subnet to the default bridge network, enabling dynamic IPv6 address allocation. @@ -126,42 +140,24 @@ will be used when IPv6 is enabled. These `/64` subnets include a 40-bit Global ID based on the Docker Engine's randomly generated ID, to give a high probability of uniqueness. +The built-in default address pool configuration is shown in [Subnet allocation](../network/_index.md#subnet-allocation). +It does not include any IPv6 pools. + To use different pools of IPv6 subnets for dynamic address allocation, you must manually configure address pools of the daemon to include: - The default IPv4 address pools - One or more IPv6 pools of your own -The default address pool configuration is: +The following example shows a valid configuration with IPv4 and IPv6 pools, +both pools provide 256 subnets. IPv4 subnets with prefix length `/24` will +be allocated from a `/16` pool. IPv6 subnets with prefix length `/64` will +be allocated from a `/56` pool. ```json { "default-address-pools": [ - { "base": "172.17.0.0/16", "size": 16 }, - { "base": "172.18.0.0/16", "size": 16 }, - { "base": "172.19.0.0/16", "size": 16 }, - { "base": "172.20.0.0/14", "size": 16 }, - { "base": "172.24.0.0/14", "size": 16 }, - { "base": "172.28.0.0/14", "size": 16 }, - { "base": "192.168.0.0/16", "size": 20 } - ] -} -``` - -The following example shows a valid configuration with the default values and -an IPv6 pool. The IPv6 pool in the example provides up to 256 IPv6 subnets of -size `/64`, from an IPv6 pool of prefix length `/56`. - -```json -{ - "default-address-pools": [ - { "base": "172.17.0.0/16", "size": 16 }, - { "base": "172.18.0.0/16", "size": 16 }, - { "base": "172.19.0.0/16", "size": 16 }, - { "base": "172.20.0.0/14", "size": 16 }, - { "base": "172.24.0.0/14", "size": 16 }, - { "base": "172.28.0.0/14", "size": 16 }, - { "base": "192.168.0.0/16", "size": 20 }, + { "base": "172.17.0.0/16", "size": 24 }, { "base": "2001:db8::/56", "size": 64 } ] } @@ -176,6 +172,9 @@ size `/64`, from an IPv6 pool of prefix length `/56`. > The default IPv4 pools are from the private address range, > similar to the default IPv6 [ULA][wikipedia-ipv6-ula] networks. +See [Subnet allocation](../network/_index.md#subnet-allocation) for more information about +`default-address-pools`. + [wikipedia-ipv6-reserved]: https://en.wikipedia.org/wiki/Reserved_IP_addresses#IPv6 [wikipedia-ipv6-ula]: https://en.wikipedia.org/wiki/Unique_local_address diff --git a/content/manuals/engine/daemon/live-restore.md b/content/manuals/engine/daemon/live-restore.md index 264afacf5c4..ab27f50ec50 100644 --- a/content/manuals/engine/daemon/live-restore.md +++ b/content/manuals/engine/daemon/live-restore.md @@ -4,8 +4,9 @@ keywords: docker, upgrade, daemon, dockerd, live-restore, daemonless container title: Live restore weight: 40 aliases: - - /engine/admin/live-restore/ - /config/containers/live-restore/ + - /engine/admin/live-restore/ + - /engine/containers/live-restore/ --- By default, when the Docker daemon terminates, it shuts down running containers. diff --git a/content/manuals/engine/daemon/logs.md b/content/manuals/engine/daemon/logs.md index 0b09f3e8e3b..e5f81082094 100644 --- a/content/manuals/engine/daemon/logs.md +++ b/content/manuals/engine/daemon/logs.md @@ -1,7 +1,7 @@ --- title: Read the daemon logs -description: How to read the event logs for the Docker daemon -keywords: docker, daemon, configuration, troubleshooting, logging +description: How to read Docker daemon logs and force a stack trace using SIGUSR1 for debugging +keywords: docker, daemon, configuration, troubleshooting, logging, debug, stack trace, SIGUSR1, signal, goroutine dump, crash diagnostics aliases: - /config/daemon/logs/ --- @@ -10,31 +10,33 @@ The daemon logs may help you diagnose problems. The logs may be saved in one of a few locations, depending on the operating system configuration and the logging subsystem used: -| Operating system | Location | -| :--------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | -| Linux | Use the command `journalctl -xu docker.service` (or read `/var/log/syslog` or `/var/log/messages`, depending on your Linux Distribution) | -| macOS (`dockerd` logs) | `~/Library/Containers/com.docker.docker/Data/log/vm/dockerd.log` | -| macOS (`containerd` logs) | `~/Library/Containers/com.docker.docker/Data/log/vm/containerd.log` | -| Windows (WSL2) (`dockerd` logs) | `%LOCALAPPDATA%\Docker\log\vm\dockerd.log` | -| Windows (WSL2) (`containerd` logs) | `%LOCALAPPDATA%\Docker\log\vm\containerd.log` | -| Windows (Windows containers) | Logs are in the Windows Event Log | +| Operating system | Location | +| :--------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | +| Linux | Use the command `journalctl -xu docker.service` (or read `/var/log/syslog` or `/var/log/messages`, depending on your Linux Distribution) | +| macOS (Docker Desktop) | `~/Library/Containers/com.docker.docker/Data/log/vm/init.log` | +| Windows (WSL2) | `%LOCALAPPDATA%\Docker\log\vm\init.log` | +| Windows (Windows containers) | Logs are in the Windows Event Log | -To view the `dockerd` logs on macOS, open a terminal Window, and use the `tail` -command with the `-f` flag to "follow" the logs. Logs will be printed until you -terminate the command using `CTRL+c`: +On macOS and Windows (WSL2), Docker Desktop writes daemon logs (`dockerd`, +`containerd`, and other VM services) to a single multiplexed `init.log` file +in JSON format. Each line contains a `"component"` field identifying the +service. To follow the logs, open a terminal and use the `tail` command with +the `-f` flag. Logs print until you terminate the command using `CTRL+c`: ```console -$ tail -f ~/Library/Containers/com.docker.docker/Data/log/vm/dockerd.log -2021-07-28T10:21:21Z dockerd time="2021-07-28T10:21:21.497642089Z" level=debug msg="attach: stdout: begin" -2021-07-28T10:21:21Z dockerd time="2021-07-28T10:21:21.497714291Z" level=debug msg="attach: stderr: begin" -2021-07-28T10:21:21Z dockerd time="2021-07-28T10:21:21.499798390Z" level=debug msg="Calling POST /v1.41/containers/35fc5ec0ffe1ad492d0a4fbf51fd6286a087b89d4dd66367fa3b7aec70b46a40/wait?condition=removed" -2021-07-28T10:21:21Z dockerd time="2021-07-28T10:21:21.518403686Z" level=debug msg="Calling GET /v1.41/containers/35fc5ec0ffe1ad492d0a4fbf51fd6286a087b89d4dd66367fa3b7aec70b46a40/json" -2021-07-28T10:21:21Z dockerd time="2021-07-28T10:21:21.527074928Z" level=debug msg="Calling POST /v1.41/containers/35fc5ec0ffe1ad492d0a4fbf51fd6286a087b89d4dd66367fa3b7aec70b46a40/start" -2021-07-28T10:21:21Z dockerd time="2021-07-28T10:21:21.528203579Z" level=debug msg="container mounted via layerStore: &{/var/lib/docker/overlay2/6e76ffecede030507fcaa576404e141e5f87fc4d7e1760e9ce5b52acb24 +$ tail -f ~/Library/Containers/com.docker.docker/Data/log/vm/init.log +{"component":"dockerd","level":"debug","msg":"attach: stdout: begin","time":"2021-07-28T10:21:21.497642089Z"} +{"component":"dockerd","level":"debug","msg":"attach: stderr: begin","time":"2021-07-28T10:21:21.497714291Z"} ... ^C ``` +To filter for `dockerd` output only: + +```console +$ grep '"component":"dockerd"' ~/Library/Containers/com.docker.docker/Data/log/vm/init.log +``` + ## Enable debugging There are two ways to enable debugging. The recommended approach is to set the @@ -117,7 +119,7 @@ The Docker daemon log can be viewed by using one of the following methods: Look in the Docker logs for a message like the following: -```none +```text ...goroutine stacks written to /var/run/docker/goroutine-stacks-2017-06-02T193336z.log ``` diff --git a/content/manuals/engine/daemon/proxy.md b/content/manuals/engine/daemon/proxy.md index d2a4a7ebfe2..5f3ff0f609f 100644 --- a/content/manuals/engine/daemon/proxy.md +++ b/content/manuals/engine/daemon/proxy.md @@ -45,7 +45,7 @@ or using CLI flags for the `--http-proxy` or `--https-proxy` flags for the { "proxies": { "http-proxy": "http://proxy.example.com:3128", - "https-proxy": "https://proxy.example.com:3129", + "https-proxy": "http://proxy.example.com:3128", "no-proxy": "*.test.example.com,.example.org,127.0.0.0/8" } } @@ -101,21 +101,21 @@ systemd drop-in file that sets the variables for the `docker` service. Environment="HTTP_PROXY=http://proxy.example.com:3128" ``` - If you are behind an HTTPS proxy server, set the `HTTPS_PROXY` environment + To proxy HTTPS requests, set the `HTTPS_PROXY` environment variable: ```systemd [Service] - Environment="HTTPS_PROXY=https://proxy.example.com:3129" + Environment="HTTPS_PROXY=http://proxy.example.com:3128" ``` - Multiple environment variables can be set; to set both a non-HTTPS and a - HTTPs proxy; + Multiple environment variables can be set; to set both an HTTP and an + HTTPS proxy; ```systemd [Service] Environment="HTTP_PROXY=http://proxy.example.com:3128" - Environment="HTTPS_PROXY=https://proxy.example.com:3129" + Environment="HTTPS_PROXY=http://proxy.example.com:3128" ``` > [!NOTE] @@ -134,7 +134,6 @@ systemd drop-in file that sets the variables for the `docker` service. The `NO_PROXY` variable specifies a string that contains comma-separated values for hosts that should be excluded from proxying. These are the options you can specify to exclude hosts: - - IP address prefix (`1.2.3.4`) - Domain name, or a special DNS label (`*`) - A domain name matches that name and all subdomains. A domain name with a @@ -151,7 +150,7 @@ systemd drop-in file that sets the variables for the `docker` service. ```systemd [Service] Environment="HTTP_PROXY=http://proxy.example.com:3128" - Environment="HTTPS_PROXY=https://proxy.example.com:3129" + Environment="HTTPS_PROXY=http://proxy.example.com:3128" Environment="NO_PROXY=localhost,127.0.0.1,docker-registry.example.com,.corp" ``` @@ -168,7 +167,7 @@ systemd drop-in file that sets the variables for the `docker` service. ```console $ sudo systemctl show --property=Environment docker - Environment=HTTP_PROXY=http://proxy.example.com:3128 HTTPS_PROXY=https://proxy.example.com:3129 NO_PROXY=localhost,127.0.0.1,docker-registry.example.com,.corp + Environment=HTTP_PROXY=http://proxy.example.com:3128 HTTPS_PROXY=http://proxy.example.com:3128 NO_PROXY=localhost,127.0.0.1,docker-registry.example.com,.corp ``` {{< /tab >}} @@ -188,21 +187,21 @@ systemd drop-in file that sets the variables for the `docker` service. Environment="HTTP_PROXY=http://proxy.example.com:3128" ``` - If you are behind an HTTPS proxy server, set the `HTTPS_PROXY` environment + To proxy HTTPS requests, set the `HTTPS_PROXY` environment variable: ```systemd [Service] - Environment="HTTPS_PROXY=https://proxy.example.com:3129" + Environment="HTTPS_PROXY=http://proxy.example.com:3128" ``` - Multiple environment variables can be set; to set both a non-HTTPS and a - HTTPs proxy; + Multiple environment variables can be set; to set both an HTTP and an + HTTPS proxy; ```systemd [Service] Environment="HTTP_PROXY=http://proxy.example.com:3128" - Environment="HTTPS_PROXY=https://proxy.example.com:3129" + Environment="HTTPS_PROXY=http://proxy.example.com:3128" ``` > [!NOTE] @@ -221,7 +220,6 @@ systemd drop-in file that sets the variables for the `docker` service. The `NO_PROXY` variable specifies a string that contains comma-separated values for hosts that should be excluded from proxying. These are the options you can specify to exclude hosts: - - IP address prefix (`1.2.3.4`) - Domain name, or a special DNS label (`*`) - A domain name matches that name and all subdomains. A domain name with a @@ -238,7 +236,7 @@ systemd drop-in file that sets the variables for the `docker` service. ```systemd [Service] Environment="HTTP_PROXY=http://proxy.example.com:3128" - Environment="HTTPS_PROXY=https://proxy.example.com:3129" + Environment="HTTPS_PROXY=http://proxy.example.com:3128" Environment="NO_PROXY=localhost,127.0.0.1,docker-registry.example.com,.corp" ``` @@ -255,7 +253,7 @@ systemd drop-in file that sets the variables for the `docker` service. ```console $ systemctl --user show --property=Environment docker - Environment=HTTP_PROXY=http://proxy.example.com:3128 HTTPS_PROXY=https://proxy.example.com:3129 NO_PROXY=localhost,127.0.0.1,docker-registry.example.com,.corp + Environment=HTTP_PROXY=http://proxy.example.com:3128 HTTPS_PROXY=http://proxy.example.com:3128 NO_PROXY=localhost,127.0.0.1,docker-registry.example.com,.corp ``` {{< /tab >}} diff --git a/content/manuals/engine/daemon/troubleshoot.md b/content/manuals/engine/daemon/troubleshoot.md index 7b68c88fb04..770b2db6179 100644 --- a/content/manuals/engine/daemon/troubleshoot.md +++ b/content/manuals/engine/daemon/troubleshoot.md @@ -545,7 +545,7 @@ all other running containers as filesystems within the container which mounts `/var/lib/docker/`. When you attempt to remove any of these containers, the removal attempt may fail with an error like the following: -```none +```text Error: Unable to remove filesystem for 74bef250361c7817bee19349c93139621b272bc8f654ae112dd4eb9652af9515: remove /var/lib/docker/containers/74bef250361c7817bee19349c93139621b272bc8f654ae112dd4eb9652af9515/shm: diff --git a/content/manuals/engine/install/_index.md b/content/manuals/engine/install/_index.md index cf03b08c40a..5b30fd279b6 100644 --- a/content/manuals/engine/install/_index.md +++ b/content/manuals/engine/install/_index.md @@ -31,6 +31,21 @@ aliases: - /engine/installation/oracle/ - /enterprise/supported-platforms/ - /install/linux/docker-ee/oracle/ +- /ee/docker-ee/sles/ +- /ee/docker-ee/suse/ +- /engine/installation/linux/docker-ce/sles/ +- /engine/installation/linux/docker-ee/sles/ +- /engine/installation/linux/docker-ee/suse/ +- /engine/installation/linux/sles/ +- /engine/installation/linux/SUSE/ +- /engine/installation/linux/suse/ +- /engine/installation/sles/ +- /engine/installation/SUSE/ +- /install/linux/docker-ce/sles/ +- /install/linux/docker-ee/sles/ +- /install/linux/docker-ee/suse/ +- /install/linux/sles/ +- /installation/sles/ --- This section describes how to install Docker Engine on Linux, also known as @@ -38,16 +53,17 @@ Docker CE. Docker Engine is also available for Windows, macOS, and Linux, through Docker Desktop. For instructions on how to install Docker Desktop, see: [Overview of Docker Desktop](/manuals/desktop/_index.md). -## Supported platforms +## Installation procedures for supported platforms + +Click on a platform's link to view the relevant installation procedure. | Platform | x86_64 / amd64 | arm64 / aarch64 | arm (32-bit) | ppc64le | s390x | | :--------------------------------------------- | :------------: | :-------------: | :----------: | :-----: | :---: | | [CentOS](centos.md) | ✅ | ✅ | | ✅ | | | [Debian](debian.md) | ✅ | ✅ | ✅ | ✅ | | | [Fedora](fedora.md) | ✅ | ✅ | | ✅ | | -| [Raspberry Pi OS (32-bit)](raspberry-pi-os.md) | | | ✅ | | | +| [Raspberry Pi OS (32-bit)](raspberry-pi-os.md) | | | ⚠️ | | | | [RHEL](rhel.md) | ✅ | ✅ | | | ✅ | -| [SLES](sles.md) | | | | | ✅ | | [Ubuntu](ubuntu.md) | ✅ | ✅ | ✅ | ✅ | ✅ | | [Binaries](binaries.md) | ✅ | ✅ | ✅ | | | @@ -58,7 +74,7 @@ see: [Overview of Docker Desktop](/manuals/desktop/_index.md). > While the following instructions may work, Docker doesn't test or verify > installation on distribution derivatives. -- If you use Debian derivatives such as "BunsenLabs Linux", "Kali Linux" or +- If you use Debian derivatives such as "BunsenLabs Linux", "Kali Linux" or "LMDE" (Debian-based Mint) should follow the installation instructions for [Debian](debian.md), substitute the version of your distribution for the corresponding Debian release. Refer to the documentation of your distribution to find @@ -105,9 +121,10 @@ Patch releases are always backward compatible with its major and minor version. ### Licensing -Docker Engine is licensed under the Apache License, Version 2.0. See -[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full -license text. +Commercial use of Docker Engine obtained via Docker Desktop +within larger enterprises (exceeding 250 employees OR with annual revenue surpassing +$10 million USD), requires a [paid subscription](https://www.docker.com/pricing?ref=Docs&refAction=DocsEngineInstall). +Apache License, Version 2.0. See [LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full license. ## Reporting security issues diff --git a/content/manuals/engine/install/centos.md b/content/manuals/engine/install/centos.md index d01ae2aaef4..90d6212e97b 100644 --- a/content/manuals/engine/install/centos.md +++ b/content/manuals/engine/install/centos.md @@ -29,7 +29,8 @@ To get started with Docker Engine on CentOS, make sure you To install Docker Engine, you need a maintained version of one of the following CentOS versions: -- CentOS 9 (stream) +- CentOS Stream 10 +- CentOS Stream 9 The `centos-extras` repository must be enabled. This repository is enabled by default. If you have disabled it, you need to re-enable it. @@ -75,6 +76,8 @@ You can install Docker Engine in different ways, depending on your needs: - In testing and development environments, you can use automated [convenience scripts](#install-using-the-convenience-script) to install Docker. +{{% include "engine-license.md" %}} + ### Install using the rpm repository {#install-using-the-repository} Before you install Docker Engine for the first time on a new host machine, you diff --git a/content/manuals/engine/install/debian.md b/content/manuals/engine/install/debian.md index 0ca59c2490a..5ddc7d1f131 100644 --- a/content/manuals/engine/install/debian.md +++ b/content/manuals/engine/install/debian.md @@ -39,15 +39,14 @@ To get started with Docker Engine on Debian, make sure you ### OS requirements -To install Docker Engine, you need the 64-bit version of one of these Debian -versions: +To install Docker Engine, you need one of these Debian versions: -- Debian Trixie 13 (testing) -- Debian Bookworm 12 (stable) -- Debian Bullseye 11 (oldstable) +- Debian Trixie 13 (stable) +- Debian Bookworm 12 (oldstable) +- Debian Bullseye 11 (oldoldstable) -Docker Engine for Debian is compatible with x86_64 (or amd64), armhf, arm64, -and ppc64le (ppc64el) architectures. +Docker Engine for Debian is compatible with x86_64 (or amd64), armhf (arm/v7), +arm64, and ppc64le (ppc64el) architectures. ### Uninstall old versions @@ -72,10 +71,10 @@ conflicts with the versions bundled with Docker Engine. Run the following command to uninstall all conflicting packages: ```console -$ for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do sudo apt-get remove $pkg; done +$ sudo apt remove $(dpkg --get-selections docker.io docker-compose docker-doc podman-docker containerd runc | cut -f1) ``` -`apt-get` might report that you have none of these packages installed. +`apt` might report that you have none of these packages installed. Images, containers, volumes, and networks stored in `/var/lib/docker/` aren't automatically removed when you uninstall Docker. If you want to start with a @@ -98,6 +97,8 @@ You can install Docker Engine in different ways, depending on your needs: - Use a [convenience script](#install-using-the-convenience-script). Only recommended for testing and development environments. +{{% include "engine-license.md" %}} + ### Install using the `apt` repository {#install-using-the-repository} Before you install Docker Engine for the first time on a new host machine, you @@ -108,23 +109,28 @@ Docker from the repository. ```bash # Add Docker's official GPG key: - sudo apt-get update - sudo apt-get install ca-certificates curl + sudo apt update + sudo apt install ca-certificates curl sudo install -m 0755 -d /etc/apt/keyrings sudo curl -fsSL {{% param "download-url-base" %}}/gpg -o /etc/apt/keyrings/docker.asc sudo chmod a+r /etc/apt/keyrings/docker.asc # Add the repository to Apt sources: - echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] {{% param "download-url-base" %}} \ - $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ - sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update + sudo tee /etc/apt/sources.list.d/docker.sources < [!NOTE] > - > If you use a derivative distribution, such as Kali Linux, + > If you use Debian testing or a derivative distribution such as Kali Linux, > you may need to substitute the part of this command that's expected to > print the version codename: > @@ -133,7 +139,7 @@ Docker from the repository. > ``` > > Replace this part with the codename of the corresponding Debian release, - > such as `bookworm`. + > such as `trixie`. 2. Install the Docker packages. @@ -143,7 +149,7 @@ Docker from the repository. To install the latest version, run: ```console - $ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + $ sudo apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin ``` {{< /tab >}} @@ -153,11 +159,10 @@ Docker from the repository. available versions in the repository: ```console - # List the available versions: - $ apt-cache madison docker-ce | awk '{ print $3 }' + $ apt list --all-versions docker-ce - 5:{{% param "docker_ce_version" %}}-1~debian.12~bookworm - 5:{{% param "docker_ce_version_prev" %}}-1~debian.12~bookworm + docker-ce/bookworm 5:{{% param "docker_ce_version" %}}-1~debian.12~bookworm + docker-ce/bookworm 5:{{% param "docker_ce_version_prev" %}}-1~debian.12~bookworm ... ``` @@ -165,12 +170,26 @@ Docker from the repository. ```console $ VERSION_STRING=5:{{% param "docker_ce_version" %}}-1~debian.12~bookworm - $ sudo apt-get install docker-ce=$VERSION_STRING docker-ce-cli=$VERSION_STRING containerd.io docker-buildx-plugin docker-compose-plugin + $ sudo apt install docker-ce=$VERSION_STRING docker-ce-cli=$VERSION_STRING containerd.io docker-buildx-plugin docker-compose-plugin ``` {{< /tab >}} {{< /tabs >}} + > [!NOTE] + > + > After installation, verify that Docker is running: + > + > ```console + > $ sudo systemctl status docker + > ``` + > + > If Docker is not running, start it manually: + > + > ```console + > $ sudo systemctl start docker + > ``` + 3. Verify that the installation is successful by running the `hello-world` image: ```console @@ -224,12 +243,23 @@ download a new file each time you want to upgrade Docker Engine. ./docker-compose-plugin__.deb ``` - The Docker daemon starts automatically. + > [!NOTE] + > + > After installation, verify that Docker is running: + > + > ```console + > $ sudo systemctl status docker + > ``` + > + > If Docker is not running, start it manually: + > + > ```console + > $ sudo systemctl start docker + > ``` 6. Verify that the installation is successful by running the `hello-world` image: ```console - $ sudo service docker start $ sudo docker run hello-world ``` @@ -252,7 +282,7 @@ To upgrade Docker Engine, download the newer package files and repeat the 1. Uninstall the Docker Engine, CLI, containerd, and Docker Compose packages: ```console - $ sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras + $ sudo apt purge docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras ``` 2. Images, containers, volumes, or custom configuration files on your host @@ -266,7 +296,7 @@ To upgrade Docker Engine, download the newer package files and repeat the 3. Remove source list and keyrings ```console - $ sudo rm /etc/apt/sources.list.d/docker.list + $ sudo rm /etc/apt/sources.list.d/docker.sources $ sudo rm /etc/apt/keyrings/docker.asc ``` diff --git a/content/manuals/engine/install/fedora.md b/content/manuals/engine/install/fedora.md index ac936854c18..205143baa67 100644 --- a/content/manuals/engine/install/fedora.md +++ b/content/manuals/engine/install/fedora.md @@ -26,9 +26,9 @@ To get started with Docker Engine on Fedora, make sure you To install Docker Engine, you need a maintained version of one of the following Fedora versions: +- Fedora 44 +- Fedora 43 - Fedora 42 -- Fedora 41 -- Fedora 40 ### Uninstall old versions @@ -73,6 +73,8 @@ You can install Docker Engine in different ways, depending on your needs: - In testing and development environments, you can use automated [convenience scripts](#install-using-the-convenience-script) to install Docker. +{{% include "engine-license.md" %}} + ### Install using the rpm repository {#install-using-the-repository} Before you install Docker Engine for the first time on a new host machine, you @@ -81,12 +83,8 @@ Docker from the repository. #### Set up the repository -Install the `dnf-plugins-core` package (which provides the commands to manage -your DNF repositories) and set up the repository. - ```console -$ sudo dnf -y install dnf-plugins-core -$ sudo dnf-3 config-manager --add-repo {{% param "download-url-base" %}}/docker-ce.repo +$ sudo dnf config-manager addrepo --from-repofile {{% param "download-url-base" %}}/docker-ce.repo ``` #### Install Docker Engine @@ -152,6 +150,17 @@ $ sudo dnf-3 config-manager --add-repo {{% param "download-url-base" %}}/docker- boot your system. If you don't want Docker to start automatically, use `sudo systemctl start docker` instead. + > [!NOTE] + > + > If the Docker service fails to start and `journalctl -u docker` + > shows `failed to find iptables`, point the `iptables` command to + > `iptables-nft` using `alternatives` and restart the service: + > + > ```console + > $ sudo alternatives --set iptables /usr/bin/iptables-nft + > $ sudo systemctl restart docker + > ``` + 3. Verify that the installation is successful by running the `hello-world` image: ```console @@ -201,6 +210,17 @@ download a new file each time you want to upgrade Docker Engine. boot your system. If you don't want Docker to start automatically, use `sudo systemctl start docker` instead. + > [!NOTE] + > + > If the Docker service fails to start and `journalctl -u docker` + > shows `failed to find iptables`, point the `iptables` command to + > `iptables-nft` using `alternatives` and restart the service: + > + > ```console + > $ sudo alternatives --set iptables /usr/bin/iptables-nft + > $ sudo systemctl restart docker + > ``` + 4. Verify that the installation is successful by running the `hello-world` image: ```console diff --git a/content/manuals/engine/install/linux-postinstall.md b/content/manuals/engine/install/linux-postinstall.md index 443185d632f..f82fe4b6117 100644 --- a/content/manuals/engine/install/linux-postinstall.md +++ b/content/manuals/engine/install/linux-postinstall.md @@ -78,7 +78,7 @@ To create the `docker` group and add your user: If you initially ran Docker CLI commands using `sudo` before adding your user to the `docker` group, you may see the following error: - ```none + ```text WARNING: Error loading config file: /home/user/.docker/config.json - stat /home/user/.docker/config.json: permission denied ``` diff --git a/content/manuals/engine/install/raspberry-pi-os.md b/content/manuals/engine/install/raspberry-pi-os.md index 1399c73ce07..0593541a519 100644 --- a/content/manuals/engine/install/raspberry-pi-os.md +++ b/content/manuals/engine/install/raspberry-pi-os.md @@ -1,10 +1,10 @@ --- description: Learn how to install Docker Engine on a 32-bit Raspberry Pi OS system. These instructions cover - the different installation methods, how to uninstall, and next steps. + the different installation methods, how to uninstall, and next steps. Note that 32-bit support will be deprecated in Docker Engine v29 and later. keywords: requirements, apt, installation, install docker engine, Raspberry Pi OS, install, uninstall, upgrade, - update -title: Install Docker Engine on Raspberry Pi OS (32-bit) -linkTitle: Raspberry Pi OS (32-bit) + update, deprecated +title: Install Docker Engine on Raspberry Pi OS (32-bit / armhf) +linkTitle: Raspberry Pi OS (32-bit / armhf) weight: 50 toc_max: 4 aliases: @@ -13,6 +13,22 @@ aliases: download-url-base: https://download.docker.com/linux/raspbian --- +> [!WARNING] +> +> **Raspberry Pi OS 32-bit (armhf) Deprecation** +> +> Docker Engine v28 will be the last major version to support Raspberry Pi OS 32-bit (armhf). +> Starting with Docker Engine v29, new major versions will no longer provide packages for Raspberry Pi OS 32-bit (armhf). +> +> **Migration options** +> - **64-bit ARM:** Install the Debian `arm64` packages (fully supported). See the +> [Debian installation instructions](debian.md). +> - **32-bit ARM (v7):** Install the Debian `armhf` packages (targets ARMv7 CPUs). +> +> **Note:** Older devices based on the ARMv6 architecture are no longer supported by official packages, including: +> - Raspberry Pi 1 (Model A/B/A+/B+) +> - Raspberry Pi Zero and Zero W + To get started with Docker Engine on Raspberry Pi OS, make sure you [meet the prerequisites](#prerequisites), and then follow the [installation steps](#installation-methods). @@ -49,6 +65,18 @@ To install Docker Engine, you need one of the following OS versions: - 32-bit Raspberry Pi OS Bookworm 12 (stable) - 32-bit Raspberry Pi OS Bullseye 11 (oldstable) +> [!WARNING] +> +> Docker Engine v28 is the last major version to support Raspberry Pi OS 32-bit (armhf). Starting with v29, +> no new packages will be provided for 32-bit Raspberry Pi OS. +> +> Migration options: +> - 64-bit ARM: use Debian `arm64` packages; see the [Debian installation instructions](debian.md). +> - 32-bit ARM (v7): use Debian `armhf` packages (targets ARMv7 CPUs). +> +> Note: ARMv6-based devices (Raspberry Pi 1 models and Raspberry Pi Zero/Zero W) are not supported by +> official packages. + ### Uninstall old versions Before you can install Docker Engine, you need to uninstall any conflicting packages. @@ -98,6 +126,8 @@ You can install Docker Engine in different ways, depending on your needs: - Use a [convenience script](#install-using-the-convenience-script). Only recommended for testing and development environments. +{{% include "engine-license.md" %}} + ### Install using the `apt` repository {#install-using-the-repository} Before you install Docker Engine for the first time on a new host machine, you @@ -132,7 +162,7 @@ Docker from the repository. ```console $ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin ``` - + {{< /tab >}} {{< tab name="Specific version" >}} @@ -158,6 +188,20 @@ Docker from the repository. {{< /tab >}} {{< /tabs >}} + > [!NOTE] + > + > After installation, verify that Docker is running: + > + > ```console + > $ sudo systemctl status docker + > ``` + > + > If Docker is not running, start it manually: + > + > ```console + > $ sudo systemctl start docker + > ``` + 3. Verify that the installation is successful by running the `hello-world` image: ```console @@ -211,12 +255,23 @@ download a new file each time you want to upgrade Docker Engine. ./docker-compose-plugin__.deb ``` - The Docker daemon starts automatically. + > [!NOTE] + > + > After installation, verify that Docker is running: + > + > ```console + > $ sudo systemctl status docker + > ``` + > + > If Docker is not running, start it manually: + > + > ```console + > $ sudo systemctl start docker + > ``` 6. Verify that the installation is successful by running the `hello-world` image: ```console - $ sudo service docker start $ sudo docker run hello-world ``` diff --git a/content/manuals/engine/install/rhel.md b/content/manuals/engine/install/rhel.md index f76d01be5ce..72c6f61540d 100644 --- a/content/manuals/engine/install/rhel.md +++ b/content/manuals/engine/install/rhel.md @@ -31,6 +31,7 @@ RHEL versions: - RHEL 8 - RHEL 9 +- RHEL 10 ### Uninstall old versions @@ -75,6 +76,8 @@ You can install Docker Engine in different ways, depending on your needs: - In testing and development environments, you can use automated [convenience scripts](#install-using-the-convenience-script) to install Docker. +{{% include "engine-license.md" %}} + ### Install using the rpm repository {#install-using-the-repository} Before you install Docker Engine for the first time on a new host machine, you diff --git a/content/manuals/engine/install/sles.md b/content/manuals/engine/install/sles.md deleted file mode 100644 index d32163c93fa..00000000000 --- a/content/manuals/engine/install/sles.md +++ /dev/null @@ -1,267 +0,0 @@ ---- -description: Learn how to install Docker Engine on SLES. These instructions cover - the different installation methods, how to uninstall, and next steps. -keywords: requirements, apt, installation, install docker engine, centos, rpm, sles, install, uninstall, - upgrade, update, s390x, ibm-z -title: Install Docker Engine on SLES (s390x) -linkTitle: SLES (s390x) -weight: 70 -toc_max: 4 -aliases: -- /ee/docker-ee/sles/ -- /ee/docker-ee/suse/ -- /engine/installation/linux/docker-ce/sles/ -- /engine/installation/linux/docker-ee/sles/ -- /engine/installation/linux/docker-ee/suse/ -- /engine/installation/linux/sles/ -- /engine/installation/linux/SUSE/ -- /engine/installation/linux/suse/ -- /engine/installation/sles/ -- /engine/installation/SUSE/ -- /install/linux/docker-ce/sles/ -- /install/linux/docker-ee/sles/ -- /install/linux/docker-ee/suse/ -- /install/linux/sles/ -- /installation/sles/ -download-url-base: https://download.docker.com/linux/sles ---- - -> [!NOTE] -> -> The installation instructions on this page refer to packages for SLES on the -> **s390x** architecture (IBM Z). Other architectures, including x86_64, aren't -> supported for SLES. - -To get started with Docker Engine on SLES, make sure you -[meet the prerequisites](#prerequisites), and then follow the -[installation steps](#installation-methods). - -## Prerequisites - -### OS requirements - -To install Docker Engine, you need a maintained version of one of the following -SLES versions: - -- SLES 15-SP4 on s390x (IBM Z) -- SLES 15-SP5 on s390x (IBM Z) - -You must enable the [`SCC SUSE`](https://scc.suse.com/packages?name=SUSE%20Linux%20Enterprise%20Server&version=15.5&arch=s390x) -repositories. - -You must add the [OpenSUSE `SELinux` repository](https://download.opensuse.org/repositories/security:/SELinux/). This repository is not added by default. Run the following commands to add it: - -```console -$ opensuse_repo="https://download.opensuse.org/repositories/security:/SELinux/openSUSE_Factory/security:SELinux.repo" -$ sudo zypper addrepo $opensuse_repo -``` - -### Uninstall old versions - -Before you can install Docker Engine, you need to uninstall any conflicting packages. - -Your Linux distribution may provide unofficial Docker packages, which may conflict -with the official packages provided by Docker. You must uninstall these packages -before you install the official version of Docker Engine. - -```console -$ sudo zypper remove docker \ - docker-client \ - docker-client-latest \ - docker-common \ - docker-latest \ - docker-latest-logrotate \ - docker-logrotate \ - docker-engine \ - runc -``` - -`zypper` might report that you have none of these packages installed. - -Images, containers, volumes, and networks stored in `/var/lib/docker/` aren't -automatically removed when you uninstall Docker. - -## Installation methods - -You can install Docker Engine in different ways, depending on your needs: - -- You can - [set up Docker's repositories](#install-using-the-repository) and install - from them, for ease of installation and upgrade tasks. This is the - recommended approach. - -- You can download the RPM package, - [install it manually](#install-from-a-package), and manage - upgrades completely manually. This is useful in situations such as installing - Docker on air-gapped systems with no access to the internet. - -- In testing and development environments, you can use automated - [convenience scripts](#install-using-the-convenience-script) to install Docker. - -### Install using the rpm repository {#install-using-the-repository} - -Before you install Docker Engine for the first time on a new host machine, you -need to set up the Docker repository. Afterward, you can install and update -Docker from the repository. - -#### Set up the repository - -Set up the repository. - -```console -$ sudo zypper addrepo {{% param "download-url-base" %}}/docker-ce.repo -``` - -#### Install Docker Engine - -1. Install the Docker packages. - - {{< tabs >}} - {{< tab name="Latest" >}} - - To install the latest version, run: - - ```console - $ sudo zypper install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin - ``` - - If prompted to accept the GPG key, verify that the fingerprint matches - `060A 61C5 1B55 8A7F 742B 77AA C52F EB6B 621E 9F35`, and if so, accept it. - - This command installs Docker, but it doesn't start Docker. It also creates a - `docker` group, however, it doesn't add any users to the group by default. - - {{< /tab >}} - {{< tab name="Specific version" >}} - - To install a specific version, start by listing the available versions in - the repository: - - ```console - $ sudo zypper search -s --match-exact docker-ce | sort -r - - v | docker-ce | package | 3:{{% param "docker_ce_version" %}}-1 | s390x | Docker CE Stable - s390x - v | docker-ce | package | 3:{{% param "docker_ce_version_prev" %}}-1 | s390x | Docker CE Stable - s390x - ``` - - The list returned depends on which repositories are enabled, and is specific - to your version of SLES. - - Install a specific version by its fully qualified package name, which is - the package name (`docker-ce`) plus the version string (2nd column), - separated by a hyphen (`-`). For example, `docker-ce-3:{{% param "docker_ce_version" %}}`. - - Replace `` with the desired version and then run the following - command to install: - - ```console - $ sudo zypper install docker-ce- docker-ce-cli- containerd.io docker-buildx-plugin docker-compose-plugin - ``` - - This command installs Docker, but it doesn't start Docker. It also creates a - `docker` group, however, it doesn't add any users to the group by default. - - {{< /tab >}} - {{< /tabs >}} - -2. Start Docker Engine. - - ```console - $ sudo systemctl enable --now docker - ``` - - This configures the Docker systemd service to start automatically when you - boot your system. If you don't want Docker to start automatically, use `sudo - systemctl start docker` instead. - -3. Verify that the installation is successful by running the `hello-world` image: - - ```console - $ sudo docker run hello-world - ``` - - This command downloads a test image and runs it in a container. When the - container runs, it prints a confirmation message and exits. - -You have now successfully installed and started Docker Engine. - -{{% include "root-errors.md" %}} - -#### Upgrade Docker Engine - -To upgrade Docker Engine, follow the [installation instructions](#install-using-the-repository), -choosing the new version you want to install. - -### Install from a package - -If you can't use Docker's `rpm` repository to install Docker Engine, you can -download the `.rpm` file for your release and install it manually. You need to -download a new file each time you want to upgrade Docker Engine. - - -1. Go to [{{% param "download-url-base" %}}/]({{% param "download-url-base" %}}/) - and choose your version of SLES. Then browse to `s390x/stable/Packages/` - and download the `.rpm` file for the Docker version you want to install. - -2. Install Docker Engine, changing the following path to the path where you downloaded - the Docker package. - - ```console - $ sudo zypper install /path/to/package.rpm - ``` - - Docker is installed but not started. The `docker` group is created, but no - users are added to the group. - -3. Start Docker Engine. - - ```console - $ sudo systemctl enable --now docker - ``` - - This configures the Docker systemd service to start automatically when you - boot your system. If you don't want Docker to start automatically, use `sudo - systemctl start docker` instead. - -4. Verify that the installation is successful by running the `hello-world` image: - - ```console - $ sudo docker run hello-world - ``` - - This command downloads a test image and runs it in a container. When the - container runs, it prints a confirmation message and exits. - -You have now successfully installed and started Docker Engine. - -{{% include "root-errors.md" %}} - -#### Upgrade Docker Engine - -To upgrade Docker Engine, download the newer package files and repeat the -[installation procedure](#install-from-a-package), using `zypper -y upgrade` -instead of `zypper -y install`, and point to the new files. - -{{% include "install-script.md" %}} - -## Uninstall Docker Engine - -1. Uninstall the Docker Engine, CLI, containerd, and Docker Compose packages: - - ```console - $ sudo zypper remove docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras - ``` - -2. Images, containers, volumes, or custom configuration files on your host - aren't automatically removed. To delete all images, containers, and volumes: - - ```console - $ sudo rm -rf /var/lib/docker - $ sudo rm -rf /var/lib/containerd - ``` - -You have to delete any edited configuration files manually. - -## Next steps - -- Continue to [Post-installation steps for Linux](linux-postinstall.md). diff --git a/content/manuals/engine/install/ubuntu.md b/content/manuals/engine/install/ubuntu.md index 2d1b920d9b6..38d23463673 100644 --- a/content/manuals/engine/install/ubuntu.md +++ b/content/manuals/engine/install/ubuntu.md @@ -51,16 +51,16 @@ To get started with Docker Engine on Ubuntu, make sure you To install Docker Engine, you need the 64-bit version of one of these Ubuntu versions: -- Ubuntu Oracular 24.10 +- Ubuntu Resolute 26.04 (LTS) +- Ubuntu Questing 25.10 - Ubuntu Noble 24.04 (LTS) - Ubuntu Jammy 22.04 (LTS) -- Ubuntu Focal 20.04 (LTS) Docker Engine for Ubuntu is compatible with x86_64 (or amd64), armhf, arm64, s390x, and ppc64le (ppc64el) architectures. > [!NOTE] -> +> > Installation on Ubuntu derivative distributions, such as Linux Mint, is not officially > supported (though it may work). @@ -88,10 +88,10 @@ conflicts with the versions bundled with Docker Engine. Run the following command to uninstall all conflicting packages: ```console -$ for pkg in docker.io docker-doc docker-compose docker-compose-v2 podman-docker containerd runc; do sudo apt-get remove $pkg; done +$ sudo apt remove $(dpkg --get-selections docker.io docker-compose docker-compose-v2 docker-doc podman-docker containerd runc | cut -f1) ``` -`apt-get` might report that you have none of these packages installed. +`apt` might report that you have none of these packages installed. Images, containers, volumes, and networks stored in `/var/lib/docker/` aren't automatically removed when you uninstall Docker. If you want to start with a @@ -114,6 +114,8 @@ You can install Docker Engine in different ways, depending on your needs: - Use a [convenience script](#install-using-the-convenience-script). Only recommended for testing and development environments. +{{% include "engine-license.md" %}} + ### Install using the `apt` repository {#install-using-the-repository} Before you install Docker Engine for the first time on a new host machine, you @@ -124,18 +126,23 @@ Docker from the repository. ```bash # Add Docker's official GPG key: - sudo apt-get update - sudo apt-get install ca-certificates curl + sudo apt update + sudo apt install ca-certificates curl sudo install -m 0755 -d /etc/apt/keyrings sudo curl -fsSL {{% param "download-url-base" %}}/gpg -o /etc/apt/keyrings/docker.asc sudo chmod a+r /etc/apt/keyrings/docker.asc # Add the repository to Apt sources: - echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] {{% param "download-url-base" %}} \ - $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ - sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update + sudo tee /etc/apt/sources.list.d/docker.sources <}} {{< tab name="Specific version" >}} @@ -156,11 +163,10 @@ Docker from the repository. available versions in the repository: ```console - # List the available versions: - $ apt-cache madison docker-ce | awk '{ print $3 }' + $ apt list --all-versions docker-ce - 5:{{% param "docker_ce_version" %}}-1~ubuntu.24.04~noble - 5:{{% param "docker_ce_version_prev" %}}-1~ubuntu.24.04~noble + docker-ce/noble 5:{{% param "docker_ce_version" %}}-1~ubuntu.24.04~noble + docker-ce/noble 5:{{% param "docker_ce_version_prev" %}}-1~ubuntu.24.04~noble ... ``` @@ -168,12 +174,26 @@ Docker from the repository. ```console $ VERSION_STRING=5:{{% param "docker_ce_version" %}}-1~ubuntu.24.04~noble - $ sudo apt-get install docker-ce=$VERSION_STRING docker-ce-cli=$VERSION_STRING containerd.io docker-buildx-plugin docker-compose-plugin + $ sudo apt install docker-ce=$VERSION_STRING docker-ce-cli=$VERSION_STRING containerd.io docker-buildx-plugin docker-compose-plugin ``` {{< /tab >}} {{< /tabs >}} + > [!NOTE] + > + > After installation, verify that Docker is running: + > + > ```console + > $ sudo systemctl status docker + > ``` + > + > If Docker is not running, start it manually: + > + > ```console + > $ sudo systemctl start docker + > ``` + 3. Verify that the installation is successful by running the `hello-world` image: ```console @@ -227,12 +247,23 @@ download a new file each time you want to upgrade Docker Engine. ./docker-compose-plugin__.deb ``` - The Docker daemon starts automatically. + > [!NOTE] + > + > After installation, verify that Docker is running: + > + > ```console + > $ sudo systemctl status docker + > ``` + > + > If Docker is not running, start it manually: + > + > ```console + > $ sudo systemctl start docker + > ``` 6. Verify that the installation is successful by running the `hello-world` image: ```console - $ sudo service docker start $ sudo docker run hello-world ``` @@ -255,7 +286,7 @@ To upgrade Docker Engine, download the newer package files and repeat the 1. Uninstall the Docker Engine, CLI, containerd, and Docker Compose packages: ```console - $ sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras + $ sudo apt purge docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras ``` 2. Images, containers, volumes, or custom configuration files on your host @@ -269,7 +300,7 @@ To upgrade Docker Engine, download the newer package files and repeat the 3. Remove source list and keyrings ```console - $ sudo rm /etc/apt/sources.list.d/docker.list + $ sudo rm /etc/apt/sources.list.d/docker.sources $ sudo rm /etc/apt/keyrings/docker.asc ``` diff --git a/content/manuals/engine/logging/drivers/awslogs.md b/content/manuals/engine/logging/drivers/awslogs.md index 189b892f5fe..46fa7399276 100644 --- a/content/manuals/engine/logging/drivers/awslogs.md +++ b/content/manuals/engine/logging/drivers/awslogs.md @@ -17,11 +17,12 @@ and Command Line Tools](https://docs.aws.amazon.com/cli/latest/reference/logs/in ## Usage To use the `awslogs` driver as the default logging driver, set the `log-driver` -and `log-opt` keys to appropriate values in the `daemon.json` file, which is -located in `/etc/docker/` on Linux hosts or -`C:\ProgramData\docker\config\daemon.json` on Windows Server. For more about -configuring Docker using `daemon.json`, see +and `log-opt` keys to appropriate values in the `daemon.json` file. For more +about configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). + +{{% include "daemon-cfg-desktop.md" %}} + The following example sets the log driver to `awslogs` and sets the `awslogs-region` option. @@ -225,12 +226,17 @@ The following `strftime` codes are supported: | `%p` | AM or PM. | AM | | `%M` | Minute as a zero-padded decimal number. | 57 | | `%S` | Second as a zero-padded decimal number. | 04 | -| `%L` | Milliseconds as a zero-padded decimal number. | .123 | | `%f` | Microseconds as a zero-padded decimal number. | 000345 | | `%z` | UTC offset in the form +HHMM or -HHMM. | +1300 | | `%Z` | Time zone name. | PST | | `%j` | Day of the year as a zero-padded decimal number. | 363 | +In addition, the following non-`strftime` codes are supported: + +| Code | Meaning | Example | +| :--- | :------------------------------------------------------------------- | :------- | +| `%L` | Milliseconds as a zero-padded decimal number preceded with a period. | .123 | + ### awslogs-multiline-pattern The `awslogs-multiline-pattern` option defines a multi-line start pattern using a diff --git a/content/manuals/engine/logging/drivers/etwlogs.md b/content/manuals/engine/logging/drivers/etwlogs.md index 34a579973e8..98f3960ade7 100644 --- a/content/manuals/engine/logging/drivers/etwlogs.md +++ b/content/manuals/engine/logging/drivers/etwlogs.md @@ -25,7 +25,7 @@ before the provider has been registered with the system. Here is an example of how to listen to these events using the logman utility program included in most installations of Windows: -1. `logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl` +1. `logman start -ets DockerContainerLogs -p "{a3693192-9ed6-46d2-a981-f8226c8363bd}" 0x0 -o trace.etl` 2. Run your container(s) with the etwlogs driver, by adding `--log-driver=etwlogs` to the Docker run command, and generate log messages. 3. `logman stop -ets DockerContainerLogs` diff --git a/content/manuals/engine/logging/drivers/fluentd.md b/content/manuals/engine/logging/drivers/fluentd.md index 4f44382affc..4169bc8c04b 100644 --- a/content/manuals/engine/logging/drivers/fluentd.md +++ b/content/manuals/engine/logging/drivers/fluentd.md @@ -33,10 +33,11 @@ Some options are supported by specifying `--log-opt` as many times as needed: - `tag`: specify a tag for Fluentd messages. Supports some Go template markup, ex `{{.ID}}`, `{{.FullID}}` or `{{.Name}}` `docker.{{.ID}}`. To use the `fluentd` driver as the default logging driver, set the `log-driver` -and `log-opt` keys to appropriate values in the `daemon.json` file, which is -located in `/etc/docker/` on Linux hosts or -`C:\ProgramData\docker\config\daemon.json` on Windows Server. For more about -configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). +and `log-opt` keys to appropriate values in the `daemon.json` file. For more +about configuring Docker using `daemon.json`, see +[daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). + +{{% include "daemon-cfg-desktop.md" %}} The following example sets the log driver to `fluentd` and sets the `fluentd-address` option. @@ -143,6 +144,11 @@ The maximum number of retries. Defaults to `4294967295` (2\*\*32 - 1). Generates event logs in nanosecond resolution. Defaults to `false`. +### fluentd-write-timeout + +Sets the timeout for the write call to the `fluentd` daemon. By default, +writes have no timeout and will block indefinitely. + ## Fluentd daemon management with Docker About `Fluentd` itself, see [the project webpage](https://www.fluentd.org) diff --git a/content/manuals/engine/logging/drivers/gcplogs.md b/content/manuals/engine/logging/drivers/gcplogs.md index cb574c3e4af..61994ce817f 100644 --- a/content/manuals/engine/logging/drivers/gcplogs.md +++ b/content/manuals/engine/logging/drivers/gcplogs.md @@ -14,12 +14,12 @@ Logging. ## Usage To use the `gcplogs` driver as the default logging driver, set the `log-driver` -and `log-opt` keys to appropriate values in the `daemon.json` file, which is -located in `/etc/docker/` on Linux hosts or -`C:\ProgramData\docker\config\daemon.json` on Windows Server. For more about -configuring Docker using `daemon.json`, see +and `log-opt` keys to appropriate values in the `daemon.json` file. For more +about configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). +{{% include "daemon-cfg-desktop.md" %}} + The following example sets the log driver to `gcplogs` and sets the `gcp-meta-name` option. diff --git a/content/manuals/engine/logging/drivers/gelf.md b/content/manuals/engine/logging/drivers/gelf.md index 5416bdc9463..1baeb98d974 100644 --- a/content/manuals/engine/logging/drivers/gelf.md +++ b/content/manuals/engine/logging/drivers/gelf.md @@ -23,11 +23,12 @@ In GELF, every log message is a dict with the following fields: ## Usage To use the `gelf` driver as the default logging driver, set the `log-driver` and -`log-opt` keys to appropriate values in the `daemon.json` file, which is located -in `/etc/docker/` on Linux hosts or `C:\ProgramData\docker\config\daemon.json` -on Windows Server. For more about configuring Docker using `daemon.json`, see +`log-opt` keys to appropriate values in the `daemon.json` file. For more about +configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). +{{% include "daemon-cfg-desktop.md" %}} + The following example sets the log driver to `gelf` and sets the `gelf-address` option. diff --git a/content/manuals/engine/logging/drivers/journald.md b/content/manuals/engine/logging/drivers/journald.md index 8bd75043a56..071d736787a 100644 --- a/content/manuals/engine/logging/drivers/journald.md +++ b/content/manuals/engine/logging/drivers/journald.md @@ -28,12 +28,12 @@ stores the following metadata in the journal with each message: ## Usage To use the `journald` driver as the default logging driver, set the `log-driver` -and `log-opts` keys to appropriate values in the `daemon.json` file, which is -located in `/etc/docker/` on Linux hosts or -`C:\ProgramData\docker\config\daemon.json` on Windows Server. For more about -configuring Docker using `daemon.json`, see +and `log-opts` keys to appropriate values in the `daemon.json` file. For more +about configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). +{{% include "daemon-cfg-desktop.md" %}} + The following example sets the log driver to `journald`: ```json diff --git a/content/manuals/engine/logging/drivers/json-file.md b/content/manuals/engine/logging/drivers/json-file.md index b275f618866..2d2ce9ec86b 100644 --- a/content/manuals/engine/logging/drivers/json-file.md +++ b/content/manuals/engine/logging/drivers/json-file.md @@ -31,12 +31,12 @@ only one container. ## Usage To use the `json-file` driver as the default logging driver, set the `log-driver` -and `log-opts` keys to appropriate values in the `daemon.json` file, which is -located in `/etc/docker/` on Linux hosts or -`C:\ProgramData\docker\config\` on Windows Server. If the file does not exist, create it first. For more information about -configuring Docker using `daemon.json`, see +and `log-opts` keys to appropriate values in the `daemon.json` file. For more +information about configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). +{{% include "daemon-cfg-desktop.md" %}} + The following example sets the log driver to `json-file` and sets the `max-size` and `max-file` options to enable automatic log-rotation. @@ -80,7 +80,7 @@ The `json-file` logging driver supports the following logging options: | `labels-regex` | Similar to and compatible with `labels`. A regular expression to match logging-related labels. Used for advanced [log tag options](log_tags.md). | `--log-opt labels-regex=^(production_status\|geo)` | | `env` | Applies when starting the Docker daemon. A comma-separated list of logging-related environment variables this daemon accepts. Used for advanced [log tag options](log_tags.md). | `--log-opt env=os,customer` | | `env-regex` | Similar to and compatible with `env`. A regular expression to match logging-related environment variables. Used for advanced [log tag options](log_tags.md). | `--log-opt env-regex=^(os\|customer)` | -| `compress` | Toggles compression for rotated logs. Default is `disabled`. | `--log-opt compress=true` | +| `compress` | Toggles compression for rotated logs. Defaults to `false` (no compression). | `--log-opt compress=true` | ### Examples diff --git a/content/manuals/engine/logging/drivers/local.md b/content/manuals/engine/logging/drivers/local.md index ba3292d4a86..d5cd56fc664 100644 --- a/content/manuals/engine/logging/drivers/local.md +++ b/content/manuals/engine/logging/drivers/local.md @@ -26,12 +26,12 @@ for each file and a default count of 5 for the number of such files (to account ## Usage To use the `local` driver as the default logging driver, set the `log-driver` -and `log-opt` keys to appropriate values in the `daemon.json` file, which is -located in `/etc/docker/` on Linux hosts or -`C:\ProgramData\docker\config\daemon.json` on Windows Server. For more about -configuring Docker using `daemon.json`, see +and `log-opt` keys to appropriate values in the `daemon.json` file. For more +about configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). +{{% include "daemon-cfg-desktop.md" %}} + The following example sets the log driver to `local` and sets the `max-size` option. diff --git a/content/manuals/engine/logging/drivers/splunk.md b/content/manuals/engine/logging/drivers/splunk.md index 97373d1198a..5a4e99e211a 100644 --- a/content/manuals/engine/logging/drivers/splunk.md +++ b/content/manuals/engine/logging/drivers/splunk.md @@ -32,11 +32,11 @@ configuration file and restart Docker. For example: } ``` -The daemon.json file is located in `/etc/docker/` on Linux hosts or -`C:\ProgramData\docker\config\daemon.json` on Windows Server. For more about -configuring Docker using `daemon.json`, see +For more about configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). +{{% include "daemon-cfg-desktop.md" %}} + > [!NOTE] > > `log-opts` configuration options in the `daemon.json` configuration file must diff --git a/content/manuals/engine/logging/drivers/syslog.md b/content/manuals/engine/logging/drivers/syslog.md index 2cabe82bcda..e27547b4dfa 100644 --- a/content/manuals/engine/logging/drivers/syslog.md +++ b/content/manuals/engine/logging/drivers/syslog.md @@ -22,7 +22,7 @@ receiver can extract the following information: The format is defined in [RFC 5424](https://tools.ietf.org/html/rfc5424) and Docker's syslog driver implements the [ABNF reference](https://tools.ietf.org/html/rfc5424#section-6) in the following way: -```none +```text TIMESTAMP SP HOSTNAME SP APP-NAME SP PROCID SP MSGID + + + | + | | | | | @@ -35,12 +35,12 @@ The format is defined in [RFC 5424](https://tools.ietf.org/html/rfc5424) and Doc ## Usage To use the `syslog` driver as the default logging driver, set the `log-driver` -and `log-opt` keys to appropriate values in the `daemon.json` file, which is -located in `/etc/docker/` on Linux hosts or -`C:\ProgramData\docker\config\daemon.json` on Windows Server. For more about -configuring Docker using `daemon.json`, see +and `log-opt` keys to appropriate values in the `daemon.json` file. For more +about configuring Docker using `daemon.json`, see [daemon.json](/reference/cli/dockerd.md#daemon-configuration-file). +{{% include "daemon-cfg-desktop.md" %}} + The following example sets the log driver to `syslog` and sets the `syslog-address` option. The `syslog-address` options supports both UDP and TCP; this example uses UDP. diff --git a/content/manuals/engine/logging/log_tags.md b/content/manuals/engine/logging/log_tags.md index d0372fe5c25..d9493abc2db 100644 --- a/content/manuals/engine/logging/log_tags.md +++ b/content/manuals/engine/logging/log_tags.md @@ -30,7 +30,7 @@ Docker supports some special template markup you can use when specifying a tag's For example, specifying a `--log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"` value yields `syslog` log lines like: -```none +```text Aug 7 18:33:19 HOSTNAME hello-world/foobar/5790672ab6a0[9103]: Hello from Docker. ``` diff --git a/content/manuals/engine/manage-resources/labels.md b/content/manuals/engine/manage-resources/labels.md index 1cc915fa2e4..87aa01f2c93 100644 --- a/content/manuals/engine/manage-resources/labels.md +++ b/content/manuals/engine/manage-resources/labels.md @@ -70,47 +70,122 @@ you build this functionality into third-party tooling. ## Manage labels on objects Each type of object with support for labels has mechanisms for adding and -managing them and using them as they relate to that type of object. These links -provide a good place to start learning about how you can use labels in your -Docker deployments. +managing them and using them as they relate to that type of object. -Labels on images, containers, local daemons, volumes, and networks are static for -the lifetime of the object. To change these labels you must recreate the object. -Labels on Swarm nodes and services can be updated dynamically. +Labels on images, containers, local daemons, volumes, and networks are static +for the lifetime of the object. To change these labels you must recreate the +object. Labels on Swarm nodes and services can be updated dynamically. -- Images and containers +### Images - - [Adding labels to images](/reference/dockerfile.md#label) - - [Overriding a container's labels at runtime](/reference/cli/docker/container/run.md#label) - - [Inspecting labels on images or containers](/reference/cli/docker/inspect.md) - - [Filtering images by label](/reference/cli/docker/image/ls.md#filter) - - [Filtering containers by label](/reference/cli/docker/container/ls.md#filter) +Add labels to images using the [`LABEL` instruction](/reference/dockerfile.md#label) in a Dockerfile: -- Local Docker daemons +```dockerfile +LABEL com.example.version="1.0" +LABEL com.example.description="Web application" +``` - - [Adding labels to a Docker daemon at runtime](/reference/cli/dockerd.md) - - [Inspecting a Docker daemon's labels](/reference/cli/docker/system/info.md) +You can also set labels at build time with the `--label` flag, without needing +a `LABEL` instruction in the Dockerfile: -- Volumes +```console +$ docker build --label "com.example.version=1.0" -t myapp . +``` - - [Adding labels to volumes](/reference/cli/docker/volume/create.md) - - [Inspecting a volume's labels](/reference/cli/docker/volume/inspect.md) - - [Filtering volumes by label](/reference/cli/docker/volume/ls.md#filter) +Inspect labels on an image using `docker inspect`: -- Networks +```console +$ docker inspect --format='{{json .Config.Labels}}' myapp +``` - - [Adding labels to a network](/reference/cli/docker/network/create.md) - - [Inspecting a network's labels](/reference/cli/docker/network/inspect.md) - - [Filtering networks by label](/reference/cli/docker/network/ls.md#filter) +Filter images by label with [`docker image ls --filter`](/reference/cli/docker/image/ls/#filter): -- Swarm nodes +```console +$ docker image ls --filter "label=com.example.version" +``` - - [Adding or updating a Swarm node's labels](/reference/cli/docker/node/update.md#label-add) - - [Inspecting a Swarm node's labels](/reference/cli/docker/node/inspect.md) - - [Filtering Swarm nodes by label](/reference/cli/docker/node/ls.md#filter) +### Containers -- Swarm services - - [Adding labels when creating a Swarm service](/reference/cli/docker/service/create.md#label) - - [Updating a Swarm service's labels](/reference/cli/docker/service/update.md) - - [Inspecting a Swarm service's labels](/reference/cli/docker/service/inspect.md) - - [Filtering Swarm services by label](/reference/cli/docker/service/ls.md#filter) +Override or add labels when starting a container with +[`docker run --label`](/reference/cli/docker/container/run/#label): + +```console +$ docker run --label "com.example.env=prod" myapp +``` + +Inspect labels on a container: + +```console +$ docker inspect --format='{{json .Config.Labels}}' mycontainer +``` + +Filter containers by label with [`docker container ls --filter`](/reference/cli/docker/container/ls/#filter): + +```console +$ docker container ls --filter "label=com.example.env=prod" +``` + +### Local Docker daemons + +Add labels to the Docker daemon by passing `--label` flags when starting +`dockerd`, or by setting `"labels"` in the +[daemon configuration file](/reference/cli/dockerd.md#daemon-configuration-file): + +```json +{ + "labels": ["com.example.environment=production"] +} +``` + +View daemon labels with `docker system info`. + +### Volumes + +Add labels when [creating a volume](/reference/cli/docker/volume/create/): + +```console +$ docker volume create --label "com.example.purpose=database" myvolume +``` + +Inspect volume labels: + +```console +$ docker volume inspect myvolume --format='{{json .Labels}}' +``` + +Filter volumes by label with [`docker volume ls --filter`](/reference/cli/docker/volume/ls/#filter): + +```console +$ docker volume ls --filter "label=com.example.purpose" +``` + +### Networks + +Add labels when [creating a network](/reference/cli/docker/network/create/): + +```console +$ docker network create --label "com.example.purpose=frontend" mynetwork +``` + +Inspect network labels: + +```console +$ docker network inspect mynetwork --format='{{json .Labels}}' +``` + +Filter networks by label with [`docker network ls --filter`](/reference/cli/docker/network/ls/#filter): + +```console +$ docker network ls --filter "label=com.example.purpose" +``` + +### Swarm nodes + +- [Adding or updating a Swarm node's labels](/reference/cli/docker/node/update/#label-add) +- [Filtering Swarm nodes by label](/reference/cli/docker/node/ls/#filter) + +### Swarm services + +- [Adding labels when creating a Swarm service](/reference/cli/docker/service/create/#label) +- [Updating a Swarm service's labels](/reference/cli/docker/service/update/) +- [Filtering Swarm services by label](/reference/cli/docker/service/ls/#filter) diff --git a/content/manuals/engine/manage-resources/pruning.md b/content/manuals/engine/manage-resources/pruning.md index 56b5b4de958..9c6861d1bec 100644 --- a/content/manuals/engine/manage-resources/pruning.md +++ b/content/manuals/engine/manage-resources/pruning.md @@ -1,11 +1,12 @@ --- description: Free up disk space by removing unused resources with the prune command -keywords: pruning, prune, images, volumes, containers, networks, disk, administration, +keywords: + pruning, prune, images, volumes, containers, networks, disk, administration, garbage collection title: Prune unused Docker objects aliases: -- /engine/admin/pruning/ -- /config/pruning/ + - /engine/admin/pruning/ + - /config/pruning/ --- Docker takes a conservative approach to cleaning up unused objects (often @@ -52,7 +53,7 @@ $ docker image prune -a --filter "until=24h" ``` Other filtering expressions are available. See the -[`docker image prune` reference](/reference/cli/docker/image/prune.md) +[`docker image prune` reference](/reference/cli/docker/image/prune/) for more examples. ## Prune containers @@ -83,7 +84,7 @@ $ docker container prune --filter "until=24h" ``` Other filtering expressions are available. See the -[`docker container prune` reference](/reference/cli/docker/container/prune.md) +[`docker container prune` reference](/reference/cli/docker/container/prune/) for more examples. ## Prune volumes @@ -111,7 +112,7 @@ $ docker volume prune --filter "label!=keep" ``` Other filtering expressions are available. See the -[`docker volume prune` reference](/reference/cli/docker/volume/prune.md) +[`docker volume prune` reference](/reference/cli/docker/volume/prune/) for more examples. ## Prune networks @@ -140,9 +141,28 @@ $ docker network prune --filter "until=24h" ``` Other filtering expressions are available. See the -[`docker network prune` reference](/reference/cli/docker/network/prune.md) +[`docker network prune` reference](/reference/cli/docker/network/prune/) for more examples. +## Prune build cache + +`docker buildx prune` removes the build cache for the currently selected +builder. If you use multiple builders, each builder maintains its own cache — +use the `--builder` flag to target a specific builder instance. + +```console +$ docker buildx prune + +WARNING! This will remove all dangling build cache. +Are you sure you want to continue? [y/N] y +``` + +By default, you're prompted to continue. To bypass the prompt, use the `-f` or +`--force` flag. + +See the [`docker buildx prune` reference](/reference/cli/docker/buildx/prune/) +for all options, including `--all` to also remove internal and frontend images. + ## Prune everything The `docker system prune` command is a shortcut that prunes images, containers, @@ -188,5 +208,5 @@ $ docker system prune --filter "until=24h" ``` Other filtering expressions are available. See the -[`docker system prune` reference](/reference/cli/docker/system/prune.md) +[`docker system prune` reference](/reference/cli/docker/system/prune/) for more examples. diff --git a/content/manuals/engine/network/_index.md b/content/manuals/engine/network/_index.md index 97583dfe8af..35d6e11787d 100644 --- a/content/manuals/engine/network/_index.md +++ b/content/manuals/engine/network/_index.md @@ -5,68 +5,99 @@ weight: 30 description: Learn how networking works from the container's point of view keywords: networking, container, standalone, IP address, DNS resolution aliases: -- /articles/networking/ -- /config/containers/container-networking/ -- /engine/tutorials/networkingcontainers/ -- /engine/userguide/networking/ -- /engine/userguide/networking/configure-dns/ -- /engine/userguide/networking/default_network/binding/ -- /engine/userguide/networking/default_network/configure-dns/ -- /engine/userguide/networking/default_network/container-communication/ -- /engine/userguide/networking/dockernetworks/ -- /network/ + - /articles/networking/ + - /config/containers/container-networking/ + - /engine/tutorials/networkingcontainers/ + - /engine/userguide/networking/ + - /engine/userguide/networking/configure-dns/ + - /engine/userguide/networking/default_network/binding/ + - /engine/userguide/networking/default_network/configure-dns/ + - /engine/userguide/networking/default_network/container-communication/ + - /engine/userguide/networking/dockernetworks/ + - /network/ --- Container networking refers to the ability for containers to connect to and -communicate with each other, or to non-Docker workloads. +communicate with each other, and with non-Docker network services. Containers have networking enabled by default, and they can make outgoing connections. A container has no information about what kind of network it's -attached to, or whether their peers are also Docker workloads or not. A +attached to, or whether its network peers are also Docker containers. A container only sees a network interface with an IP address, a gateway, a -routing table, DNS services, and other networking details. That is, unless the -container uses the `none` network driver. +routing table, DNS services, and other networking details. This page describes networking from the point of view of the container, and the concepts around container networking. -This page doesn't describe OS-specific details about how Docker networks work. -For information about how Docker manipulates `iptables` rules on Linux, -see [Packet filtering and firewalls](packet-filtering-firewalls.md). + +When Docker Engine on Linux starts for the first time, it has a single +built-in network called the "default bridge" network. When you run a +container without the `--network` option, it is connected to the default +bridge. + +Containers attached to the default bridge have access to network services +outside the Docker host. They use "masquerading" which means, if the +Docker host has Internet access, no additional configuration is needed +for the container to have Internet access. + +For example, to run a container on the default bridge network, and have +it ping an Internet host: + +```console +$ docker run --rm -ti busybox ping -c1 docker.com +PING docker.com (23.185.0.4): 56 data bytes +64 bytes from 23.185.0.4: seq=0 ttl=62 time=6.564 ms + +--- docker.com ping statistics --- +1 packets transmitted, 1 packets received, 0% packet loss +round-trip min/avg/max = 6.564/6.564/6.564 ms +``` ## User-defined networks -You can create custom, user-defined networks, and connect multiple containers -to the same network. Once connected to a user-defined network, containers can -communicate with each other using container IP addresses or container names. +With the default configuration, containers attached to the default +bridge network have unrestricted network access to each other using +container IP addresses. They cannot refer to each other by name. + +It can be useful to separate groups of containers that should have full +access to each other, but restricted access to containers in other groups. + +You can create custom, user-defined networks, and connect groups of containers +to the same network. Once connected to a user-defined network, containers +can communicate with each other using container IP addresses or container names. The following example creates a network using the `bridge` network driver and -running a container in the created network: +runs a container in that network: ```console $ docker network create -d bridge my-net -$ docker run --network=my-net -itd --name=container3 busybox +$ docker run --network=my-net -it busybox ``` ### Drivers -The following network drivers are available by default, and provide core -networking functionality: +Docker Engine has a number of network drivers, as well as the default "bridge". +On Linux, the following built-in network drivers are available: -| Driver | Description | -| :-------- | :----------------------------------------------------------------------- | -| `bridge` | The default network driver. | -| `host` | Remove network isolation between the container and the Docker host. | -| `none` | Completely isolate a container from the host and other containers. | -| `overlay` | Overlay networks connect multiple Docker daemons together. | -| `ipvlan` | IPvlan networks provide full control over both IPv4 and IPv6 addressing. | -| `macvlan` | Assign a MAC address to a container. | +| Driver | Description | +| :------------------------------ | :------------------------------------------------------------------ | +| [bridge](./drivers/bridge.md) | The default network driver. | +| [host](./drivers/host.md) | Remove network isolation between the container and the Docker host. | +| [none](./drivers/none.md) | Completely isolate a container from the host and other containers. | +| [overlay](./drivers/overlay.md) | Swarm Overlay networks connect multiple Docker daemons together. | +| [ipvlan](./drivers/ipvlan.md) | Connect containers to external VLANs. | +| [macvlan](./drivers/macvlan.md) | Containers appear as devices on the host's network. | -For more information about the different drivers, see [Network drivers -overview](./drivers/_index.md). +More information can be found in the network driver specific pages, including +their configuration options and details about their functionality. + +Native Windows containers have a different set of drivers, see +[Windows container network drivers](https://learn.microsoft.com/en-us/virtualization/windowscontainers/container-networking/network-drivers-topologies). ### Connecting to multiple networks -A container can be connected to multiple networks. +Connecting a container to a network can be compared to connecting an Ethernet +cable to a physical host. Just as a host can be connected to multiple Ethernet +networks, a container can be connected to multiple Docker networks. For example, a frontend container may be connected to a bridge network with external access, and a @@ -78,6 +109,8 @@ A container may also be connected to different types of network. For example, an `ipvlan` network to provide internet access, and a `bridge` network for access to local services. +Containers can also share networking stacks, see [Container networks](#container-networks). + When sending packets, if the destination is an address in a directly connected network, packets are sent to that network. Otherwise, packets are sent to a default gateway for routing to their destination. In the example above, @@ -87,8 +120,8 @@ The default gateway is selected by Docker, and may change whenever a container's network connections change. To make Docker choose a specific default gateway when creating the container or connecting a new network, set a gateway priority. See option `gw-priority` -for the [`docker run`](/reference/cli/docker/container/run.md) and -[`docker network connect`](/reference/cli/docker/network/connect.md) commands. +for the [`docker run`](/reference/cli/docker/container/run/) and +[`docker network connect`](/reference/cli/docker/network/connect/) commands. The default `gw-priority` is `0` and the gateway in the network with the highest priority is the default gateway. So, when a network should always @@ -99,84 +132,20 @@ $ docker run --network name=gwnet,gw-priority=1 --network anet1 --name myctr myi $ docker network connect anet2 myctr ``` -## Container networks - -In addition to user-defined networks, you can attach a container to another -container's networking stack directly, using the `--network -container:` flag format. - -The following flags aren't supported for containers using the `container:` -networking mode: - -- `--add-host` -- `--hostname` -- `--dns` -- `--dns-search` -- `--dns-option` -- `--mac-address` -- `--publish` -- `--publish-all` -- `--expose` - -The following example runs a Redis container, with Redis binding to -`localhost`, then running the `redis-cli` command and connecting to the Redis -server over the `localhost` interface. - -```console -$ docker run -d --name redis example/redis --bind 127.0.0.1 -$ docker run --rm -it --network container:redis example/redis-cli -h 127.0.0.1 -``` - ## Published ports -By default, when you create or run a container using `docker create` or `docker run`, -containers on bridge networks don't expose any ports to the outside world. -Use the `--publish` or `-p` flag to make a port available to services -outside the bridge network. -This creates a firewall rule in the host, -mapping a container port to a port on the Docker host to the outside world. -Here are some examples: - -| Flag value | Description | -| ------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `-p 8080:80` | Map port `8080` on the Docker host to TCP port `80` in the container. | -| `-p 192.168.1.100:8080:80` | Map port `8080` on the Docker host IP `192.168.1.100` to TCP port `80` in the container. | -| `-p 8080:80/udp` | Map port `8080` on the Docker host to UDP port `80` in the container. | -| `-p 8080:80/tcp -p 8080:80/udp` | Map TCP port `8080` on the Docker host to TCP port `80` in the container, and map UDP port `8080` on the Docker host to UDP port `80` in the container. | - -> [!IMPORTANT] -> -> Publishing container ports is insecure by default. Meaning, when you publish -> a container's ports it becomes available not only to the Docker host, but to -> the outside world as well. -> -> If you include the localhost IP address (`127.0.0.1`, or `::1`) with the -> publish flag, only the Docker host and its containers can access the -> published container port. -> -> ```console -> $ docker run -p 127.0.0.1:8080:80 -p '[::1]:8080:80' nginx -> ``` -> -> > [!WARNING] -> > -> > In releases older than 28.0.0, hosts within the same L2 segment (for example, -> > hosts connected to the same network switch) can reach ports published to localhost. -> > For more information, see -> > [moby/moby#45610](https://github.com/moby/moby/issues/45610) - -If you want to make a container accessible to other containers, -it isn't necessary to publish the container's ports. -You can enable inter-container communication by connecting the containers to the -same network, usually a [bridge network](./drivers/bridge.md). - -Ports on the host's IPv6 addresses will map to the container's IPv4 address -if no host IP is given in a port mapping, the bridge network is IPv4-only, -and `--userland-proxy=true` (default). +When you create or run a container using `docker create` or `docker run`, all +ports of containers on bridge networks are accessible from the Docker host and +other containers connected to the same network. Ports are not accessible from +outside the host or, with the default configuration, from containers in other +networks. + +Use the `--publish` or `-p` flag to make a port available outside the host, +and to containers in other bridge networks. For more information about port mapping, including how to disable it and use direct routing to containers, see -[packet filtering and firewalls](./packet-filtering-firewalls.md). +[port publishing](./port-publishing.md). ## IP address and hostname @@ -203,6 +172,85 @@ You can override the hostname using `--hostname`. When connecting to an existing network using `docker network connect`, you can use the `--alias` flag to specify an additional network alias for the container on that network. +### Subnet allocation + +Docker networks can use either explicitly configured subnets or automatically allocated ones from default pools. + +#### Explicit subnet configuration + +You can specify exact subnets when creating a network: + +```console +$ docker network create --ipv6 --subnet 192.0.2.0/24 --subnet 2001:db8::/64 mynet +``` + +#### Automatic subnet allocation + +When no `--subnet` option is provided, Docker automatically selects a subnet from predefined "default address pools". +These pools can be configured in `/etc/docker/daemon.json`. Docker's built-in default is equivalent to: + +```json +{ + "default-address-pools": [ + { "base": "172.17.0.0/16", "size": 16 }, + { "base": "172.18.0.0/16", "size": 16 }, + { "base": "172.19.0.0/16", "size": 16 }, + { "base": "172.20.0.0/14", "size": 16 }, + { "base": "172.24.0.0/14", "size": 16 }, + { "base": "172.28.0.0/14", "size": 16 }, + { "base": "192.168.0.0/16", "size": 20 } + ] +} +``` + +- `base`: The subnet that can be allocated from. +- `size`: The prefix length used for each allocated subnet. + +When an IPv6 subnet is required and there are no IPv6 addresses in `default-address-pools`, Docker allocates +subnets from a Unique Local Address (ULA) prefix. To use specific IPv6 subnets instead, add them to your +`default-address-pools`. See [Dynamic IPv6 subnet allocation](../daemon/ipv6.md#dynamic-ipv6-subnet-allocation) +for more information. + +Docker attempts to avoid address prefixes already in use on the host. However, you may need to customize +`default-address-pools` to prevent routing conflicts in some network environments. + +The default pools use large subnets, which limits the number of networks you can create. You can divide base +subnets into smaller pools to support more networks. + +For example, this configuration allows Docker to create 256 networks from `172.17.0.0/16`. +Docker will allocate subnets `172.17.0.0/24`, `172.17.1.0/24`, and so on, up to `172.17.255.0/24`: + +```json +{ + "default-address-pools": [{ "base": "172.17.0.0/16", "size": 24 }] +} +``` + +You can also request a subnet with a specific prefix length from the default pools by using unspecified +addresses in the `--subnet` option: + +```console +$ docker network create --ipv6 --subnet ::/56 --subnet 0.0.0.0/24 mynet +6686a6746b17228f5052528113ddad0e6d68e2e3905d648e336b33409f2d3b64 +$ docker network inspect mynet -f '{{json .IPAM.Config}}' | jq . +[ + { + "Subnet": "172.19.0.0/24", + "Gateway": "172.19.0.1" + }, + { + "Subnet": "fdd3:6f80:972c::/56", + "Gateway": "fdd3:6f80:972c::1" + } +] +``` + +> [!NOTE] +> +> Support for unspecified addresses in `--subnet` was introduced in Docker 29.0.0. +> If Docker is downgraded to an older version, networks created in this way will become unusable. +> They can be removed and re-created, or will function again if the daemon is restored to 29.0.0 or later. + ## DNS services Containers use the same DNS servers as the host by default, but you can @@ -212,9 +260,12 @@ By default, containers inherit the DNS settings as defined in the `/etc/resolv.conf` configuration file. Containers that attach to the default `bridge` network receive a copy of this file. Containers that attach to a -[custom network](tutorials/standalone.md#use-user-defined-bridge-networks) +[custom network](drivers/bridge.md#use-user-defined-bridge-networks) use Docker's embedded DNS server. The embedded DNS server forwards external DNS lookups to the DNS servers configured on the host. +The embedded DNS server address is `127.0.0.11`. +There is no IPv6 equivalent; the IPv4 address works even in IPv6-only containers. +If an application requires an explicit DNS server address, use `127.0.0.11`. You can configure DNS resolution on a per-container basis, using flags for the `docker run` or `docker create` command used to start the container. @@ -222,7 +273,7 @@ The following table describes the available `docker run` flags related to DNS configuration. | Flag | Description | -| -------------- |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `--dns` | The IP address of a DNS server. To specify multiple DNS servers, use multiple `--dns` flags. DNS requests will be forwarded from the container's network namespace so, for example, `--dns=127.0.0.1` refers to the container's own loopback address. | | `--dns-search` | A DNS search domain to search non-fully qualified hostnames. To specify multiple DNS search prefixes, use multiple `--dns-search` flags. | | `--dns-opt` | A key-value pair representing a DNS option and its value. See your operating system's documentation for `resolv.conf` for valid options. | @@ -234,10 +285,33 @@ Your container will have lines in `/etc/hosts` which define the hostname of the container itself, as well as `localhost` and a few other common things. Custom hosts, defined in `/etc/hosts` on the host machine, aren't inherited by containers. To pass additional hosts into a container, refer to [add entries to -container hosts file](/reference/cli/docker/container/run.md#add-host) in the +container hosts file](/reference/cli/docker/container/run/#add-host) in the `docker run` reference documentation. -## Proxy server +## Container networks + +In addition to user-defined networks, you can attach a container to another +container's networking stack directly, using the `--network +container:` flag format. -If your container needs to use a proxy server, see -[Use a proxy server](/manuals/engine/daemon/proxy.md). +The following flags aren't supported for containers using the `container:` +networking mode: + +- `--add-host` +- `--hostname` +- `--dns` +- `--dns-search` +- `--dns-option` +- `--mac-address` +- `--publish` +- `--publish-all` +- `--expose` + +The following example runs a Redis container, with Redis binding to +127.0.0.1, then running the `redis-cli` command and connecting to the Redis +server over 127.0.0.1. + +```console +$ docker run -d --name redis redis --bind 127.0.0.1 +$ docker run --rm -it --network container:redis redis redis-cli -h 127.0.0.1 +``` diff --git a/content/manuals/engine/network/drivers/_index.md b/content/manuals/engine/network/drivers/_index.md index 3e986899523..27b386ae19f 100644 --- a/content/manuals/engine/network/drivers/_index.md +++ b/content/manuals/engine/network/drivers/_index.md @@ -66,12 +66,7 @@ exist by default, and provide core networking functionality: - Third-party network plugins allow you to integrate Docker with specialized network stacks. -## Networking tutorials +## Next steps -Now that you understand the basics about Docker networks, deepen your -understanding using the following tutorials: - -- [Standalone networking tutorial](/manuals/engine/network/tutorials/standalone.md) -- [Host networking tutorial](/manuals/engine/network/tutorials/host.md) -- [Overlay networking tutorial](/manuals/engine/network/tutorials/overlay.md) -- [Macvlan networking tutorial](/manuals/engine/network/tutorials/macvlan.md) +Each driver page includes detailed explanations, configuration options, and +hands-on usage examples to help you work with that driver effectively. diff --git a/content/manuals/engine/network/drivers/bridge.md b/content/manuals/engine/network/drivers/bridge.md index 4f0b3268d36..17f3a6dea11 100644 --- a/content/manuals/engine/network/drivers/bridge.md +++ b/content/manuals/engine/network/drivers/bridge.md @@ -3,24 +3,35 @@ title: Bridge network driver description: All about using user-defined bridge networks and the default bridge keywords: network, bridge, user-defined, standalone aliases: -- /config/containers/bridges/ -- /engine/userguide/networking/default_network/build-bridges/ -- /engine/userguide/networking/default_network/custom-docker0/ -- /engine/userguide/networking/work-with-networks/ -- /network/bridge/ -- /network/drivers/bridge/ + - /config/containers/bridges/ + - /engine/userguide/networking/default_network/build-bridges/ + - /engine/userguide/networking/default_network/custom-docker0/ + - /engine/userguide/networking/work-with-networks/ + - /network/bridge/ + - /network/drivers/bridge/ + - /engine/network/tutorials/standalone/ --- -In terms of networking, a bridge network is a Link Layer device -which forwards traffic between network segments. A bridge can be a hardware -device or a software device running within a host machine's kernel. +A Docker bridge network has an IPv4 subnet and, optionally, an IPv6 subnet. +Each container connected to the bridge network has a network interface with +addresses in the network's subnets. By default, it: + +- Allows unrestricted network access to containers in the network from + the host, and from other containers connected to the same bridge network. +- Blocks access from containers in other networks and from outside the + Docker host. +- Uses masquerading to give containers external network access. Devices on + the host's external networks only see the IP address of the Docker host. +- Supports port publishing, where network traffic is forwarded between + container ports and ports on host IP addresses. The published ports + can be accessed from outside the Docker host, on its IP addresses. In terms of Docker, a bridge network uses a software bridge which lets containers connected to the same bridge network communicate, while providing -isolation from containers that aren't connected to that bridge network. The -Docker bridge driver automatically installs rules in the host machine so that -containers on different bridge networks can't communicate directly with each -other. +isolation from containers that aren't connected to that bridge network. By +default, the Docker bridge driver automatically installs rules in the host +machine so that containers connected to different bridge networks can only +communicate with each other using published ports. Bridge networks apply to containers running on the same Docker daemon host. For communication among containers running on different Docker daemon hosts, you @@ -84,7 +95,6 @@ network.** was to link them using the [`--link` flag](../links.md). This type of variable sharing isn't possible with user-defined networks. However, there are superior ways to share environment variables. A few ideas: - - Multiple containers can mount a file or directory containing the shared information, using a Docker volume. @@ -106,9 +116,10 @@ The following table describes the driver-specific options that you can pass to `--opt` when creating a custom network using the `bridge` driver. | Option | Default | Description | -|-------------------------------------------------------------------------------------------------|-----------------------------|-----------------------------------------------------------------------------------------------------| +| ----------------------------------------------------------------------------------------------- | --------------------------- | --------------------------------------------------------------------------------------------------- | | `com.docker.network.bridge.name` | | Interface name to use when creating the Linux bridge. | | `com.docker.network.bridge.enable_ip_masquerade` | `true` | Enable IP masquerading. | +| `com.docker.network.host_ipv4`
`com.docker.network.host_ipv6` | | Address to use for source NAT. See [Packet filtering and firewalls](packet-filtering-firewalls.md). | | `com.docker.network.bridge.gateway_mode_ipv4`
`com.docker.network.bridge.gateway_mode_ipv6` | `nat` | Control external connectivity. See [Packet filtering and firewalls](packet-filtering-firewalls.md). | | `com.docker.network.bridge.enable_icc` | `true` | Enable or Disable inter-container connectivity. | | `com.docker.network.bridge.host_binding_ipv4` | all IPv4 and IPv6 addresses | Default IP when binding container ports. | @@ -167,7 +178,7 @@ $ docker network create my-net You can specify the subnet, the IP address range, the gateway, and other options. See the -[docker network create](/reference/cli/docker/network/create.md#specify-advanced-options) +[docker network create](/reference/cli/docker/network/create/#specify-advanced-options) reference or the output of `docker network create --help` for details. Use the `docker network rm` command to remove a user-defined bridge @@ -269,7 +280,7 @@ the settings you need to customize. "fixed-cidr": "192.168.1.0/25", "mtu": 1500, "default-gateway": "192.168.1.254", - "dns": ["10.20.1.2","10.20.1.3"] + "dns": ["10.20.1.2", "10.20.1.3"] } ``` @@ -279,7 +290,6 @@ In this example: - The bridge network's subnet is "192.168.1.0/24" (from `bip`). - Container addresses will be allocated from "192.168.1.0/25" (from `fixed-cidr`). - ### Use IPv6 with the default bridge network IPv6 can be enabled for the default bridge using the following options in @@ -343,9 +353,326 @@ or a device attached to it. This option can only be used with user-defined bridge networks. +## Usage examples + +This section provides hands-on examples for working with bridge networks. + +### Use the default bridge network + +This example shows how the default `bridge` network works. You start two +`alpine` containers on the default bridge and test how they communicate. + +> [!NOTE] +> The default `bridge` network is not recommended for production. Use +> user-defined bridge networks instead. + +1. List current networks: + + ```console + $ docker network ls + + NETWORK ID NAME DRIVER SCOPE + 17e324f45964 bridge bridge local + 6ed54d316334 host host local + 7092879f2cc8 none null local + ``` + + The default `bridge` network is listed, along with `host` and `none`. + +2. Start two `alpine` containers running `ash`. The `-dit` flags mean detached, + interactive, and with a TTY. Since you haven't specified a `--network` flag, + the containers connect to the default `bridge` network. + + ```console + $ docker run -dit --name alpine1 alpine ash + $ docker run -dit --name alpine2 alpine ash + ``` + + Verify both containers are running: + + ```console + $ docker container ls + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 602dbf1edc81 alpine "ash" 4 seconds ago Up 3 seconds alpine2 + da33b7aa74b0 alpine "ash" 17 seconds ago Up 16 seconds alpine1 + ``` + +3. Inspect the `bridge` network to see connected containers: + + ```console + $ docker network inspect bridge + ``` + + The output shows both containers connected, with their assigned IP addresses + (`172.17.0.2` for `alpine1` and `172.17.0.3` for `alpine2`). + +4. Connect to `alpine1`: + + ```console + $ docker attach alpine1 + + / # + ``` + + Show the network interfaces for `alpine1` from within the container: + + ```console + # ip addr show + + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 27: eth0@if28: mtu 1500 qdisc noqueue state UP + link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.2/16 scope global eth0 + valid_lft forever preferred_lft forever + ``` + + In this example, the `eth0` interface has the IP address `172.17.0.2`. + +5. From within `alpine1`, verify you can connect to the internet: + + ```console + # ping -c 2 google.com + + PING google.com (172.217.3.174): 56 data bytes + 64 bytes from 172.217.3.174: seq=0 ttl=41 time=9.841 ms + 64 bytes from 172.217.3.174: seq=1 ttl=41 time=9.897 ms + + --- google.com ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 9.841/9.869/9.897 ms + ``` + +6. Ping the second container by its IP address: + + ```console + # ping -c 2 172.17.0.3 + + PING 172.17.0.3 (172.17.0.3): 56 data bytes + 64 bytes from 172.17.0.3: seq=0 ttl=64 time=0.086 ms + 64 bytes from 172.17.0.3: seq=1 ttl=64 time=0.094 ms + + --- 172.17.0.3 ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 0.086/0.090/0.094 ms + ``` + + This succeeds. Now try pinging by container name: + + ```console + # ping -c 2 alpine2 + + ping: bad address 'alpine2' + ``` + + On the default bridge network, containers can't resolve each other by name. + +7. Detach from `alpine1` without stopping it using `CTRL+p CTRL+q`. + +8. Clean up: stop the containers and remove them. + + ```console + $ docker container stop alpine1 alpine2 + $ docker container rm alpine1 alpine2 + ``` + + Stopped containers lose their IP addresses. + +### Use user-defined bridge networks + +This example shows how user-defined bridge networks provide better isolation +and automatic DNS resolution between containers. + +1. Create the `alpine-net` network: + + ```console + $ docker network create --driver bridge alpine-net + ``` + +2. List Docker's networks: + + ```console + $ docker network ls + + NETWORK ID NAME DRIVER SCOPE + e9261a8c9a19 alpine-net bridge local + 17e324f45964 bridge bridge local + 6ed54d316334 host host local + 7092879f2cc8 none null local + ``` + + Inspect the `alpine-net` network: + + ```console + $ docker network inspect alpine-net + ``` + + This shows the network's gateway (for example, `172.18.0.1`) and that no + containers are connected yet. + +3. Create four containers. Three connect to `alpine-net`, and one connects to + the default `bridge`. Then connect one container to both networks: + + ```console + $ docker run -dit --name alpine1 --network alpine-net alpine ash + $ docker run -dit --name alpine2 --network alpine-net alpine ash + $ docker run -dit --name alpine3 alpine ash + $ docker run -dit --name alpine4 --network alpine-net alpine ash + $ docker network connect bridge alpine4 + ``` + + Verify all containers are running: + + ```console + $ docker container ls + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 156849ccd902 alpine "ash" 41 seconds ago Up 41 seconds alpine4 + fa1340b8d83e alpine "ash" 51 seconds ago Up 51 seconds alpine3 + a535d969081e alpine "ash" About a minute ago Up About a minute alpine2 + 0a02c449a6e9 alpine "ash" About a minute ago Up About a minute alpine1 + ``` + +4. Inspect both networks again to see which containers are connected: + + ```console + $ docker network inspect bridge + ``` + + Containers `alpine3` and `alpine4` are connected to the `bridge` network. + + ```console + $ docker network inspect alpine-net + ``` + + Containers `alpine1`, `alpine2`, and `alpine4` are connected to + `alpine-net`. + +5. On user-defined networks, containers can resolve each other by name. Connect + to `alpine1` and test: + + > [!NOTE] + > Automatic service discovery only resolves custom container names, not + > default automatically generated names. + + ```console + $ docker container attach alpine1 + + # ping -c 2 alpine2 + + PING alpine2 (172.18.0.3): 56 data bytes + 64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.085 ms + 64 bytes from 172.18.0.3: seq=1 ttl=64 time=0.090 ms + + --- alpine2 ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 0.085/0.087/0.090 ms + + # ping -c 2 alpine4 + + PING alpine4 (172.18.0.4): 56 data bytes + 64 bytes from 172.18.0.4: seq=0 ttl=64 time=0.076 ms + 64 bytes from 172.18.0.4: seq=1 ttl=64 time=0.091 ms + + --- alpine4 ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 0.076/0.083/0.091 ms + ``` + +6. From `alpine1`, you can't connect to `alpine3` because it's on a different + network: + + ```console + # ping -c 2 alpine3 + + ping: bad address 'alpine3' + ``` + + You also can't connect by IP address. If `alpine3`'s IP is `172.17.0.2`: + + ```console + # ping -c 2 172.17.0.2 + + PING 172.17.0.2 (172.17.0.2): 56 data bytes + + --- 172.17.0.2 ping statistics --- + 2 packets transmitted, 0 packets received, 100% packet loss + ``` + + Detach from `alpine1` using `CTRL+p CTRL+q`. + +7. Since `alpine4` is connected to both networks, it can reach all containers. + However, you need to use `alpine3`'s IP address: + + ```console + $ docker container attach alpine4 + + # ping -c 2 alpine1 + + PING alpine1 (172.18.0.2): 56 data bytes + 64 bytes from 172.18.0.2: seq=0 ttl=64 time=0.074 ms + 64 bytes from 172.18.0.2: seq=1 ttl=64 time=0.082 ms + + --- alpine1 ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 0.074/0.078/0.082 ms + + # ping -c 2 alpine2 + + PING alpine2 (172.18.0.3): 56 data bytes + 64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.075 ms + 64 bytes from 172.18.0.3: seq=1 ttl=64 time=0.080 ms + + --- alpine2 ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 0.075/0.077/0.080 ms + + # ping -c 2 alpine3 + ping: bad address 'alpine3' + + # ping -c 2 172.17.0.2 + + PING 172.17.0.2 (172.17.0.2): 56 data bytes + 64 bytes from 172.17.0.2: seq=0 ttl=64 time=0.089 ms + 64 bytes from 172.17.0.2: seq=1 ttl=64 time=0.075 ms + + --- 172.17.0.2 ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 0.075/0.082/0.089 ms + ``` + +8. Verify all containers can connect to the internet: + + ```console + # ping -c 2 google.com + + PING google.com (172.217.3.174): 56 data bytes + 64 bytes from 172.217.3.174: seq=0 ttl=41 time=9.778 ms + 64 bytes from 172.217.3.174: seq=1 ttl=41 time=9.634 ms + + --- google.com ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 9.634/9.706/9.778 ms + ``` + + Detach with `CTRL+p CTRL+q` and repeat for `alpine3` and `alpine1` if + desired. + +9. Clean up: + + ```console + $ docker container stop alpine1 alpine2 alpine3 alpine4 + $ docker container rm alpine1 alpine2 alpine3 alpine4 + $ docker network rm alpine-net + ``` + ## Next steps -- Go through the [standalone networking tutorial](/manuals/engine/network/tutorials/standalone.md) - Learn about [networking from the container's point of view](../_index.md) - Learn about [overlay networks](./overlay.md) - Learn about [Macvlan networks](./macvlan.md) diff --git a/content/manuals/engine/network/drivers/host.md b/content/manuals/engine/network/drivers/host.md index 704fe5f095c..86c53e01113 100644 --- a/content/manuals/engine/network/drivers/host.md +++ b/content/manuals/engine/network/drivers/host.md @@ -3,8 +3,9 @@ title: Host network driver description: All about exposing containers on the Docker host's network keywords: network, host, standalone, host mode networking aliases: -- /network/host/ -- /network/drivers/host/ + - /network/host/ + - /network/drivers/host/ + - /engine/network/tutorials/host/ --- If you use the `host` network mode for a container, that container's network @@ -32,7 +33,17 @@ Host mode networking can be useful for the following use cases: This is because it doesn't require network address translation (NAT), and no "userland-proxy" is created for each port. -The host networking driver is supported on Docker Engine (Linux only) and Docker Desktop version 4.34 and later. +## Platform support + +The host networking driver is supported on: + +- Docker Engine on Linux +- Docker Desktop version 4.34 and later (requires enabling the feature in + Settings) + +> [!NOTE] +> For Docker Desktop users, see the [Docker Desktop section](#docker-desktop) +> below for setup instructions. You can also use a `host` network for a swarm service, by passing `--network host` to the `docker service create` command. In this case, control traffic (traffic @@ -93,19 +104,66 @@ $ nc localhost 80 ### Limitations - Processes inside the container cannot bind to the IP addresses of the host - because the container has no direct access to the interfaces of the host. + because the container has no direct access to the interfaces of the host. - The host network feature of Docker Desktop works on layer 4. This means that -unlike with Docker on Linux, network protocols that operate below TCP or UDP are -not supported. + unlike with Docker on Linux, network protocols that operate below TCP or UDP are + not supported. - This feature doesn't work with Enhanced Container Isolation enabled, since -isolating your containers from the host and allowing them access to the host -network contradict each other. + isolating your containers from the host and allowing them access to the host + network contradict each other. - Only Linux containers are supported. Host networking does not work with Windows containers. +## Usage example + +This example shows how to start an Nginx container that binds directly to port +80 on the Docker host. From a networking perspective, this provides the same +level of isolation as if Nginx were running directly on the host, but the +container remains isolated in all other aspects (storage, process namespace, +user namespace). + +### Prerequisites + +- Port 80 must be available on the Docker host. To make Nginx listen on a + different port, see the [Nginx image documentation](https://hub.docker.com/_/nginx/). +- The host networking driver only works on Linux hosts, and as an opt-in + feature in Docker Desktop version 4.34 and later. + +### Steps + +1. Create and start the container as a detached process. The `--rm` option + removes the container when it exits. The `-d` flag starts it in the + background: + + ```console + $ docker run --rm -d --network host --name my_nginx nginx + ``` + +2. Access Nginx by browsing to [http://localhost:80/](http://localhost:80/). + +3. Examine your network stack: + + Check all network interfaces and verify that no new interface was created: + + ```console + $ ip addr show + ``` + + Verify which process is bound to port 80 using `netstat`. You need `sudo` + because the process is owned by the Docker daemon user: + + ```console + $ sudo netstat -tulpn | grep :80 + ``` + +4. Stop the container. It's removed automatically because of the `--rm` option: + + ```console + $ docker container stop my_nginx + ``` + ## Next steps -- Go through the [host networking tutorial](/manuals/engine/network/tutorials/host.md) - Learn about [networking from the container's point of view](../_index.md) - Learn about [bridge networks](./bridge.md) - Learn about [overlay networks](./overlay.md) diff --git a/content/manuals/engine/network/drivers/ipvlan.md b/content/manuals/engine/network/drivers/ipvlan.md index c5adad016eb..2bcd17f27eb 100644 --- a/content/manuals/engine/network/drivers/ipvlan.md +++ b/content/manuals/engine/network/drivers/ipvlan.md @@ -1,18 +1,19 @@ --- title: IPvlan network driver -description: All about using IPvlan to make your containers appear like physical machines +description: + All about using IPvlan to make your containers appear like physical machines on the network keywords: network, ipvlan, l2, l3, standalone aliases: -- /network/ipvlan/ -- /network/drivers/ipvlan/ + - /network/ipvlan/ + - /network/drivers/ipvlan/ --- The IPvlan driver gives users total control over both IPv4 and IPv6 addressing. The VLAN driver builds on top of that in giving operators complete control of layer 2 VLAN tagging and even IPvlan L3 routing for users interested in underlay network integration. For overlay deployments that abstract away physical constraints -see the [multi-host overlay](/manuals/engine/network/tutorials/overlay.md) driver. +see the [multi-host overlay](overlay.md) driver. IPvlan is a new twist on the tried and true network virtualization technique. The Linux implementations are extremely lightweight because rather than using @@ -52,7 +53,7 @@ The following table describes the driver-specific options that you can pass to `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. - Kernel requirements: - - IPvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy). To check your current kernel version, use `uname -r` + - IPvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy). To check your current kernel version, use `uname -r` ### IPvlan L2 mode example usage @@ -292,11 +293,11 @@ as parent interfaces. Example mappings from NetOps to Docker network commands are as follows: - VLAN: 10, Subnet: 172.16.80.0/24, Gateway: 172.16.80.1 - - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` + - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` - VLAN: 20, IP subnet: 172.16.50.0/22, Gateway: 172.16.50.1 - - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20` + - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20` - VLAN: 30, Subnet: 10.1.100.0/16, Gateway: 10.1.100.1 - - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` + - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` ### IPvlan L3 mode example @@ -533,7 +534,7 @@ in order to forward broadcast and multicast packets. $ docker network create -d ipvlan \ --subnet=192.168.110.0/24 \ --subnet=192.168.112.0/24 \ - --subnet=2001:db8:abc6::/64 \ + --ipv6 --subnet=2001:db8:abc6::/64 \ -o parent=eth0 \ -o ipvlan_mode=l3 ipnet110 diff --git a/content/manuals/engine/network/drivers/macvlan.md b/content/manuals/engine/network/drivers/macvlan.md index 43e67a7a04e..88279aec6b3 100644 --- a/content/manuals/engine/network/drivers/macvlan.md +++ b/content/manuals/engine/network/drivers/macvlan.md @@ -1,13 +1,15 @@ --- title: Macvlan network driver -description: All about using Macvlan to make your containers appear like physical +description: + All about using Macvlan to make your containers appear like physical machines on the network keywords: network, macvlan, standalone aliases: -- /config/containers/macvlan/ -- /engine/userguide/networking/get-started-macvlan/ -- /network/macvlan/ -- /network/drivers/macvlan/ + - /config/containers/macvlan/ + - /engine/userguide/networking/get-started-macvlan/ + - /network/macvlan/ + - /network/drivers/macvlan/ + - /engine/network/tutorials/macvlan/ --- Some applications, especially legacy applications or applications which monitor @@ -19,7 +21,17 @@ case, you need to designate a physical interface on your Docker host to use for the Macvlan, as well as the subnet and gateway of the network. You can even isolate your Macvlan networks using different physical network interfaces. -Keep the following things in mind: +## Platform support and requirements + +- The macvlan driver only works on Linux hosts. It is not supported on + Docker Desktop for Mac or Windows, or Docker Engine on Windows. +- Most cloud providers block macvlan networking. You may need physical access to + your networking equipment. +- Requires at least Linux kernel version 3.9 (version 4.0 or later is + recommended). +- The macvlan driver is not supported in rootless mode. + +## Considerations - You may unintentionally degrade your network due to IP address exhaustion or to "VLAN spread", a situation that occurs when you have an @@ -32,6 +44,13 @@ Keep the following things in mind: overlay (to communicate across multiple Docker hosts), these solutions may be better in the long term. +- Containers attached to a macvlan network cannot communicate with the host + directly, this is a restriction in the Linux kernel. If you need communication + between the host and the containers, you can connect the containers to a + bridge network as well as the macvlan. It is also possible to create a + macvlan interface on the host with the same parent interface, and assign it + an IP address in the Docker network's subnet. + ## Options The following table describes the driver-specific options that you can pass to @@ -94,15 +113,23 @@ $ docker network create -d macvlan \ ### Use an IPvlan instead of Macvlan -In the above example, you are still using a L3 bridge. You can use `ipvlan` -instead, and get an L2 bridge. Specify `-o ipvlan_mode=l2`. +An `ipvlan` network created with option `-o ipvlan_mode=l2` is similar +to a macvlan network. The main difference is that the `ipvlan` driver +doesn't assign a MAC address to each container, the layer-2 network stack +is shared by devices in the ipvlan network. So, containers use the parent +interface's MAC address. + +The network will see fewer MAC addresses, and the host's MAC address will be +associated with the IP address of each container. + +The choice of network type depends on your environment and requirements. +There are some notes about the trade-offs in the [Linux kernel +documentation](https://docs.kernel.org/networking/ipvlan.html#what-to-choose-macvlan-vs-ipvlan). ```console $ docker network create -d ipvlan \ --subnet=192.168.210.0/24 \ - --subnet=192.168.212.0/24 \ --gateway=192.168.210.254 \ - --gateway=192.168.212.254 \ -o ipvlan_mode=l2 -o parent=eth0 ipvlan210 ``` @@ -120,7 +147,167 @@ $ docker network create -d macvlan \ -o macvlan_mode=bridge macvlan216 ``` -## Next steps +## Usage examples + +This section provides hands-on examples for working with macvlan networks, +including bridge mode and 802.1Q trunk bridge mode. + +> [!NOTE] +> These examples assume your ethernet interface is `eth0`. If your device has a +> different name, use that instead. + +### Bridge mode example + +In bridge mode, your traffic flows through `eth0` and Docker routes traffic to +your container using its MAC address. To network devices on your network, your +container appears to be physically attached to the network. + +1. Create a macvlan network called `my-macvlan-net`. Modify the `subnet`, + `gateway`, and `parent` values to match your environment: + + ```console + $ docker network create -d macvlan \ + --subnet=172.16.86.0/24 \ + --gateway=172.16.86.1 \ + -o parent=eth0 \ + my-macvlan-net + ``` + + Verify the network was created: + + ```console + $ docker network ls + $ docker network inspect my-macvlan-net + ``` + +2. Start an `alpine` container and attach it to the `my-macvlan-net` network. + The `-dit` flags start the container in the background. The `--rm` flag + removes the container when it stops: + + ```console + $ docker run --rm -dit \ + --network my-macvlan-net \ + --name my-macvlan-alpine \ + alpine:latest \ + ash + ``` + +3. Inspect the container and notice the `MacAddress` key within the `Networks` + section: + + ```console + $ docker container inspect my-macvlan-alpine + ``` + + Look for output similar to: + + ```json + "Networks": { + "my-macvlan-net": { + "Gateway": "172.16.86.1", + "IPAddress": "172.16.86.2", + "IPPrefixLen": 24, + "MacAddress": "02:42:ac:10:56:02", + ... + } + } + ``` + +4. Check how the container sees its own network interfaces: + + ```console + $ docker exec my-macvlan-alpine ip addr show eth0 + + 9: eth0@tunl0: mtu 1500 qdisc noqueue state UP + link/ether 02:42:ac:10:56:02 brd ff:ff:ff:ff:ff:ff + inet 172.16.86.2/24 brd 172.16.86.255 scope global eth0 + valid_lft forever preferred_lft forever + ``` + + Check the routing table: + + ```console + $ docker exec my-macvlan-alpine ip route + + default via 172.16.86.1 dev eth0 + 172.16.86.0/24 dev eth0 scope link src 172.16.86.2 + ``` + +5. Stop the container (Docker removes it automatically) and remove the network: + + ```console + $ docker container stop my-macvlan-alpine + $ docker network rm my-macvlan-net + ``` + +### 802.1Q trunked bridge mode example + +In 802.1Q trunk bridge mode, your traffic flows through a sub-interface of +`eth0` (called `eth0.10`) and Docker routes traffic to your container using its +MAC address. To network devices on your network, your container appears to be +physically attached to the network. + +1. Create a macvlan network called `my-8021q-macvlan-net`. Modify the `subnet`, + `gateway`, and `parent` values to match your environment: + + ```console + $ docker network create -d macvlan \ + --subnet=172.16.86.0/24 \ + --gateway=172.16.86.1 \ + -o parent=eth0.10 \ + my-8021q-macvlan-net + ``` + + Verify the network was created and has parent `eth0.10`. You can use `ip addr +show` on the Docker host to verify that the interface `eth0.10` exists: + + ```console + $ docker network ls + $ docker network inspect my-8021q-macvlan-net + ``` + +2. Start an `alpine` container and attach it to the `my-8021q-macvlan-net` + network: + + ```console + $ docker run --rm -itd \ + --network my-8021q-macvlan-net \ + --name my-second-macvlan-alpine \ + alpine:latest \ + ash + ``` + +3. Inspect the container and notice the `MacAddress` key: + + ```console + $ docker container inspect my-second-macvlan-alpine + ``` + + Look for the `Networks` section with the MAC address. + +4. Check how the container sees its own network interfaces: + + ```console + $ docker exec my-second-macvlan-alpine ip addr show eth0 + + 11: eth0@if10: mtu 1500 qdisc noqueue state UP + link/ether 02:42:ac:10:56:02 brd ff:ff:ff:ff:ff:ff + inet 172.16.86.2/24 brd 172.16.86.255 scope global eth0 + valid_lft forever preferred_lft forever + ``` + + Check the routing table: + + ```console + $ docker exec my-second-macvlan-alpine ip route + + default via 172.16.86.1 dev eth0 + 172.16.86.0/24 dev eth0 scope link src 172.16.86.2 + ``` + +5. Stop the container and remove the network: -Learn how to use the Macvlan driver in the -[Macvlan networking tutorial](/manuals/engine/network/tutorials/macvlan.md). + ```console + $ docker container stop my-second-macvlan-alpine + $ docker network rm my-8021q-macvlan-net + ``` diff --git a/content/manuals/engine/network/drivers/none.md b/content/manuals/engine/network/drivers/none.md index 9a42ca3cb3c..d0715b1ff4f 100644 --- a/content/manuals/engine/network/drivers/none.md +++ b/content/manuals/engine/network/drivers/none.md @@ -3,8 +3,8 @@ title: None network driver description: How to isolate the networking stack of a container using the none driver keywords: network, none, standalone aliases: -- /network/none/ -- /network/drivers/none/ + - /network/none/ + - /network/drivers/none/ --- If you want to completely isolate the networking stack of a container, you can @@ -32,8 +32,8 @@ $ docker run --rm --network none --name no-net-alpine alpine:latest ip addr show ## Next steps -- Go through the [host networking tutorial](/manuals/engine/network/tutorials/host.md) - Learn about [networking from the container's point of view](../_index.md) +- Learn about [host networking](host.md) - Learn about [bridge networks](bridge.md) - Learn about [overlay networks](overlay.md) - Learn about [Macvlan networks](macvlan.md) diff --git a/content/manuals/engine/network/drivers/overlay.md b/content/manuals/engine/network/drivers/overlay.md index 043e2ddc3ec..e9f2daafc09 100644 --- a/content/manuals/engine/network/drivers/overlay.md +++ b/content/manuals/engine/network/drivers/overlay.md @@ -3,10 +3,12 @@ title: Overlay network driver description: All about using overlay networks keywords: network, overlay, user-defined, swarm, service aliases: -- /config/containers/overlay/ -- /engine/userguide/networking/overlay-security-model/ -- /network/overlay/ -- /network/drivers/overlay/ + - /config/containers/overlay/ + - /engine/userguide/networking/overlay-security-model/ + - /network/overlay/ + - /network/drivers/overlay/ + - /engine/network/tutorials/overlay/ + - /engine/userguide/networking/get-started-overlay/ --- The `overlay` network driver creates a distributed network among multiple @@ -29,16 +31,26 @@ This page describes overlay networks in general, and when used with standalone containers. For information about overlay for Swarm services, see [Manage Swarm service networks](/manuals/engine/swarm/networking.md). +## Requirements + +Docker hosts must be part of a swarm to use overlay networks, even when +connecting standalone containers. The following ports must be open between +participating hosts: + +- `2377/tcp`: Swarm control plane (configurable) +- `4789/udp`: Overlay traffic (configurable) +- `7946/tcp` and `7946/udp`: Node communication (not configurable) + ## Create an overlay network -Before you start, you must ensure that participating nodes can communicate over the network. -The following table lists ports that need to be open to each host participating in an overlay network: +The following table lists the ports that need to be open to each host +participating in an overlay network: -| Ports | Description | -| :--------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `2377/tcp` | The default Swarm control plane port, is configurable with [`docker swarm join --listen-addr`](/reference/cli/docker/swarm/join.md#--listen-addr-value) | -| `4789/udp` | The default overlay traffic port, configurable with [`docker swarm init --data-path-addr`](/reference/cli/docker/swarm/init.md#data-path-port) | -| `7946/tcp`, `7946/udp` | Used for communication among nodes, not configurable | +| Ports | Description | +| :--------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `2377/tcp` | The default Swarm control plane port, is configurable with [`docker swarm join --listen-addr`](/reference/cli/docker/swarm/join/#listen-addr) | +| `4789/udp` | The default overlay traffic port, configurable with [`docker swarm init --data-path-addr`](/reference/cli/docker/swarm/init/#data-path-port) | +| `7946/tcp`, `7946/udp` | Used for communication among nodes, not configurable | To create an overlay network that containers on other Docker hosts can connect to, run the following command: @@ -122,9 +134,357 @@ the same host. For more information about this limitation, see [moby/moby#44973](https://github.com/moby/moby/issues/44973#issuecomment-1543747718). +## Usage examples + +This section provides hands-on examples for working with overlay networks. These +examples cover swarm services and standalone containers on multiple Docker hosts. + +### Prerequisites + +All examples require at least a single-node swarm. Initialize one by running +`docker swarm init` on the host. You can run these examples on multi-node +swarms as well. + +### Use the default overlay network + +This example shows how the default overlay network works with swarm services. +You'll create an `nginx` service and examine the network from the service +containers' perspective. + +#### Prerequisites for multi-node setup + +This walkthrough requires three Docker hosts that can communicate with each +other on the same network with no firewall blocking traffic between them: + +- `manager`: Functions as both manager and worker +- `worker-1`: Functions as worker only +- `worker-2`: Functions as worker only + +If you don't have three hosts available, you can set up three virtual machines +on a cloud provider with Docker installed. + +#### Create the swarm + +1. On `manager`, initialize the swarm. If the host has one network interface, + the `--advertise-addr` flag is optional: + + ```console + $ docker swarm init --advertise-addr= + ``` + + Save the join token displayed for use with workers. + +2. On `worker-1`, join the swarm: + + ```console + $ docker swarm join --token \ + --advertise-addr \ + :2377 + ``` + +3. On `worker-2`, join the swarm: + + ```console + $ docker swarm join --token \ + --advertise-addr \ + :2377 + ``` + +4. On `manager`, list all nodes: + + ```console + $ docker node ls + + ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS + d68ace5iraw6whp7llvgjpu48 * ip-172-31-34-146 Ready Active Leader + nvp5rwavvb8lhdggo8fcf7plg ip-172-31-35-151 Ready Active + ouvx2l7qfcxisoyms8mtkgahw ip-172-31-36-89 Ready Active + ``` + + Filter by role if needed: + + ```console + $ docker node ls --filter role=manager + $ docker node ls --filter role=worker + ``` + +5. List Docker networks on all hosts. Each now has an overlay network called + `ingress` and a bridge network called `docker_gwbridge`: + + ```console + $ docker network ls + + NETWORK ID NAME DRIVER SCOPE + 495c570066be bridge bridge local + 961c6cae9945 docker_gwbridge bridge local + ff35ceda3643 host host local + trtnl4tqnc3n ingress overlay swarm + c8357deec9cb none null local + ``` + +The `docker_gwbridge` connects the `ingress` network to the Docker host's +network interface. If you create services without specifying a network, they +connect to `ingress`. It's recommended to use separate overlay networks for each +application or group of related applications. + +#### Create the services + +1. On `manager`, create a new overlay network: + + ```console + $ docker network create -d overlay nginx-net + ``` + + The overlay network is automatically created on worker nodes when they run + service tasks that need it. + +2. On `manager`, create a 5-replica Nginx service connected to `nginx-net`: + + > [!NOTE] + > Services can only be created on a manager. + + ```console + $ docker service create \ + --name my-nginx \ + --publish target=80,published=80 \ + --replicas=5 \ + --network nginx-net \ + nginx + ``` + + The default `ingress` publish mode means you can browse to port 80 on any + node and connect to one of the 5 service tasks, even if no tasks run on that + node. + +3. Monitor service creation progress: + + ```console + $ docker service ls + ``` + +4. Inspect the `nginx-net` network on all hosts. The `Containers` section lists + all service tasks connected to the overlay network from that host. + +5. From `manager`, inspect the service: + + ```console + $ docker service inspect my-nginx + ``` + + Notice the information about ports and endpoints. + +6. Create a second network and update the service to use it: + + ```console + $ docker network create -d overlay nginx-net-2 + $ docker service update \ + --network-add nginx-net-2 \ + --network-rm nginx-net \ + my-nginx + ``` + +7. Verify the update completed: + + ```console + $ docker service ls + ``` + + Inspect both networks to verify containers moved from `nginx-net` to + `nginx-net-2`. + + > [!NOTE] + > Overlay networks are automatically created on swarm worker nodes as needed, + > but aren't automatically removed. + +8. Clean up: + + ```console + $ docker service rm my-nginx + $ docker network rm nginx-net nginx-net-2 + ``` + +### Use a user-defined overlay network + +This example shows the recommended approach for production services using custom +overlay networks. + +#### Prerequisites + +This assumes the swarm is already set up and you're on a manager node. + +#### Steps + +1. Create a user-defined overlay network: + + ```console + $ docker network create -d overlay my-overlay + ``` + +2. Start a service using the overlay network, publishing port 80 to port 8080: + + ```console + $ docker service create \ + --name my-nginx \ + --network my-overlay \ + --replicas 1 \ + --publish published=8080,target=80 \ + nginx:latest + ``` + +3. Verify the service task is connected to the network: + + ```console + $ docker network inspect my-overlay + ``` + + Check the `Containers` section for the `my-nginx` service task. + +4. Clean up: + + ```console + $ docker service rm my-nginx + $ docker network rm my-overlay + ``` + +### Use an overlay network for standalone containers + +This example demonstrates DNS container discovery between standalone containers +on different Docker hosts using an overlay network. + +#### Prerequisites + +You need two Docker hosts that can communicate with each other with the +following ports open between them: + +- TCP port 2377 +- TCP and UDP port 7946 +- UDP port 4789 + +This example refers to the hosts as `host1` and `host2`. + +#### Steps + +1. Set up the swarm: + + On `host1`, initialize a swarm: + + ```console + $ docker swarm init + Swarm initialized: current node (vz1mm9am11qcmo979tlrlox42) is now a manager. + + To add a worker to this swarm, run the following command: + + docker swarm join --token SWMTKN-1-5g90q48weqrtqryq4kj6ow0e8xm9wmv9o6vgqc5j320ymybd5c-8ex8j0bc40s6hgvy5ui5gl4gy 172.31.47.252:2377 + ``` + + On `host2`, join the swarm using the token from the previous output: + + ```console + $ docker swarm join --token :2377 + This node joined a swarm as a worker. + ``` + + If the join fails, run `docker swarm leave --force` on `host2`, verify + network and firewall settings, and try again. + +2. On `host1`, create an attachable overlay network: + + ```console + $ docker network create --driver=overlay --attachable test-net + uqsof8phj3ak0rq9k86zta6ht + ``` + + Note the returned network ID. + +3. On `host1`, start an interactive container that connects to `test-net`: + + ```console + $ docker run -it --name alpine1 --network test-net alpine + / # + ``` + +4. On `host2`, list available networks. Notice that `test-net` doesn't exist yet: + + ```console + $ docker network ls + NETWORK ID NAME DRIVER SCOPE + ec299350b504 bridge bridge local + 66e77d0d0e9a docker_gwbridge bridge local + 9f6ae26ccb82 host host local + omvdxqrda80z ingress overlay swarm + b65c952a4b2b none null local + ``` + +5. On `host2`, start a detached, interactive container that connects to + `test-net`: + + ```console + $ docker run -dit --name alpine2 --network test-net alpine + fb635f5ece59563e7b8b99556f816d24e6949a5f6a5b1fbd92ca244db17a4342 + ``` + + > [!NOTE] + > Automatic DNS container discovery only works with unique container names. + +6. On `host2`, verify that `test-net` was created with the same network ID as on + `host1`: + + ```console + $ docker network ls + NETWORK ID NAME DRIVER SCOPE + ... + uqsof8phj3ak test-net overlay swarm + ``` + +7. On `host1`, ping `alpine2` from within `alpine1`: + + ```console + / # ping -c 2 alpine2 + PING alpine2 (10.0.0.5): 56 data bytes + 64 bytes from 10.0.0.5: seq=0 ttl=64 time=0.600 ms + 64 bytes from 10.0.0.5: seq=1 ttl=64 time=0.555 ms + + --- alpine2 ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max = 0.555/0.577/0.600 ms + ``` + + The two containers communicate over the overlay network connecting the two + hosts. You can also run another container on `host2` and ping `alpine1`: + + ```console + $ docker run -it --rm --name alpine3 --network test-net alpine + / # ping -c 2 alpine1 + / # exit + ``` + +8. On `host1`, close the `alpine1` session (which stops the container): + + ```console + / # exit + ``` + +9. Clean up. You must stop and remove containers on each host independently: + + On `host2`: + + ```console + $ docker container stop alpine2 + $ docker network ls + $ docker container rm alpine2 + ``` + + When you stop `alpine2`, `test-net` disappears from `host2`. + + On `host1`: + + ```console + $ docker container rm alpine1 + $ docker network rm test-net + ``` + ## Next steps -- Go through the [overlay networking tutorial](/manuals/engine/network/tutorials/overlay.md) - Learn about [networking from the container's point of view](../_index.md) - Learn about [standalone bridge networks](bridge.md) - Learn about [Macvlan networks](macvlan.md) diff --git a/content/manuals/engine/network/firewall-iptables.md b/content/manuals/engine/network/firewall-iptables.md new file mode 100644 index 00000000000..87d63e9cbbd --- /dev/null +++ b/content/manuals/engine/network/firewall-iptables.md @@ -0,0 +1,132 @@ +--- +title: Docker with iptables +weight: 10 +description: How Docker works with iptables +keywords: network, iptables, firewall +--- + +Docker creates iptables rules in the host's network namespace for bridge +networks. For bridge and other network types, iptables rules for DNS are +also created in the container's network namespace. + +Creation of iptables rules can be disabled using daemon options `iptables` +and `ip6tables`, see [Prevent Docker from manipulating firewall rules](packet-filtering-firewalls.md#prevent-docker-from-manipulating-firewall-rules). +However, this is not recommended for most users as it will likely break +container networking. + +### Docker and iptables chains + +To support bridge and overlay networks, Docker creates the following custom +`iptables` chains in the `filter` table: + +* `DOCKER-USER` + * A placeholder for user-defined rules that will be processed before rules + in the `DOCKER-FORWARD` and `DOCKER` chains. +* `DOCKER-FORWARD` + * The first stage of processing for Docker's networks. Rules that pass packets + that are not related to established connections to the other Docker chains, + as well as rules to accept packets that are part of established connections. +* `DOCKER`, `DOCKER-BRIDGE`, `DOCKER-INTERNAL` + * Rules that determine whether a packet that is not part of an established + connection should be accepted, based on the port forwarding configuration + of running containers. +* `DOCKER-CT` + * Per-bridge connection tracking rules. +* `DOCKER-INGRESS` + * Rules related to Swarm networking. + +In the `FORWARD` chain, Docker adds rules that unconditionally jump to the +`DOCKER-USER`, `DOCKER-FORWARD` and `DOCKER-INGRESS` chains. + +In the `nat` table, Docker creates chain `DOCKER` and adds rules to implement +masquerading and port-mapping. + +Docker requires IP Forwarding to be enabled on the host for its default +bridge network configuration. If it enables IP Forwarding, it also sets the +default policy of the iptables `FORWARD` chain in the `filter` table to `DROP`. + +### Add iptables policies before Docker's rules + +Packets that get accepted or rejected by rules in these custom chains will not +be seen by user-defined rules appended to the `FORWARD` chain. So, to add +additional rules to filter these packets, use the `DOCKER-USER` chain. + +Rules appended to the `FORWARD` chain will be processed after Docker's rules. + +### Match the original IP and ports for requests + +When packets arrive to the `DOCKER-USER` chain, they have already passed through +a Destination Network Address Translation (DNAT) filter. That means that the +`iptables` flags you use can only match internal IP addresses and ports of +containers. + +If you want to match traffic based on the original IP and port in the network +request, you must use the +[`conntrack` iptables extension](https://ipset.netfilter.org/iptables-extensions.man.html#lbAO). +For example: + +```console +$ sudo iptables -I DOCKER-USER -p tcp -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT +$ sudo iptables -I DOCKER-USER -p tcp -m conntrack --ctorigdst 198.51.100.2 --ctorigdstport 80 -j ACCEPT +``` + +> [!IMPORTANT] +> +> Using the `conntrack` extension may result in degraded performance. + +### Allow forwarding between host interfaces + +If Docker has set the default policy of the `FORWARD` chain in the `filter` +table to `DROP`, a rule in `DOCKER-USER` can be used to allow forwarding +between host interfaces. For example: + +```console +$ iptables -I DOCKER-USER -i src_if -o dst_if -j ACCEPT +``` + +### Restrict external connections to containers + +By default, all external source IPs are allowed to connect to ports that have +been published to the Docker host's addresses. + +To allow only a specific IP or network to access the containers, insert a +negated rule at the top of the `DOCKER-USER` filter chain. For example, the +following rule drops packets from all IP addresses except `192.0.2.2`: + +```console +$ iptables -I DOCKER-USER -i ext_if ! -s 192.0.2.2 -j DROP +``` + +You will need to change `ext_if` to correspond with your +host's actual external interface. You could instead allow connections from a +source subnet. The following rule only allows access from the subnet `192.0.2.0/24`: + +```console +$ iptables -I DOCKER-USER -i ext_if ! -s 192.0.2.0/24 -j DROP +``` + +Finally, you can specify a range of IP addresses to accept using `--src-range` +(Remember to also add `-m iprange` when using `--src-range` or `--dst-range`): + +```console +$ iptables -I DOCKER-USER -m iprange -i ext_if ! --src-range 192.0.2.1-192.0.2.3 -j DROP +``` + +You can combine `-s` or `--src-range` with `-d` or `--dst-range` to control both +the source and destination. For example, if the Docker host has addresses +`2001:db8:1111::2` and `2001:db8:2222::2`, you can make rules specific to +`2001:db8:1111::2` and leave `2001:db8:2222::2` open. + +You may need to allow responses from servers outside the permitted external address +ranges. For example, containers may send DNS or HTTP requests to hosts that are +not allowed to access the container's services. The following rule accepts any +incoming or outgoing packet belonging to a flow that has already been accepted +by other rules. It must be placed before `DROP` rules that restrict access from +external address ranges. + +```console +$ iptables -I DOCKER-USER -m state --state RELATED,ESTABLISHED -j ACCEPT +``` + +For more information about iptables configuration and advanced usage, +refer to the [Netfilter.org HOWTO](https://www.netfilter.org/documentation/HOWTO/NAT-HOWTO.html). diff --git a/content/manuals/engine/network/firewall-nftables.md b/content/manuals/engine/network/firewall-nftables.md new file mode 100644 index 00000000000..f868e6c95e7 --- /dev/null +++ b/content/manuals/engine/network/firewall-nftables.md @@ -0,0 +1,275 @@ +--- +title: Docker with nftables +weight: 10 +description: How Docker works with nftables +keywords: network, nftables, firewall +--- + +> [!WARNING] +> +> Support for nftables introduced in Docker 29.0.0 is experimental, configuration +> options, behavior and implementation may all change in future releases. +> The rules for overlay networks have not yet been migrated from iptables. +> Therefore, nftables cannot be enabled when the Docker daemon is running in +> Swarm mode. + +To use nftables instead of iptables, use Docker Engine option +`--firewall-backend=nftables` on its command line, or `"firewall-backend": "nftables"` +in its configuration file. You may also need to modify IP forwarding configuration +on the host, and migrate rules from the iptables `DOCKER-USER` chain, see +[migrating from iptables to nftables](#migrating-from-iptables-to-nftables). + +For bridge networks, Docker creates nftables rules in the host's network +namespace. For bridge and other network types, nftables rules for DNS are +also created in the container's network namespace. + +Creation of nftables rules can be disabled using daemon options `iptables` +and `ip6tables`. _These options apply to both iptables and nftables._ +See [Prevent Docker from manipulating firewall rules](packet-filtering-firewalls.md#prevent-docker-from-manipulating-firewall-rules). +However, this is not recommended for most users as it will likely break +container networking. + +## Docker's nftables tables + +For bridge networks, Docker creates two tables, `ip docker-bridges` and +`ip6 docker-bridges`. + +Each table contains a number of [base chains](https://wiki.nftables.org/wiki-nftables/index.php/Configuring_chains#Adding_base_chains), +and further chains are added for each bridge network. The moby project +has some [internal documentation](https://github.com/moby/moby/blob/master/integration/network/bridge/nftablesdoc/index.md) +describing its nftables, and how they depend on network and container +configuration. However, the tables and their rules are likely to change +between Docker Engine releases. + +> [!NOTE] +> +> Do not modify Docker's tables directly as the modifications are likely to +> be lost, Docker expects to have full ownership of its tables. + +> [!NOTE] +> +> Because iptables has a fixed set of chains, equivalent to nftables base +> chains, all rules are included in those chains. The `DOCKER-USER` chain +> is supplied as a way to insert rules into the `filter` table's `FORWARD` +> chain, to run before Docker's rules. +> In Docker's nftables implementation, there is no `DOCKER-USER` chain. +> Instead, rules can be added in separate tables, with base chains that +> have the same types and hook points as Docker's base chains. If necessary, +> [base chain priority](https://wiki.nftables.org/wiki-nftables/index.php/Configuring_chains#Base_chain_priority) +> can be used to tell nftables which order to call the chains in. +> Docker uses well known [priority values](https://wiki.nftables.org/wiki-nftables/index.php/Netfilter_hooks#Priority_within_hook) for each of its base chains. + +## Migrating from iptables to nftables + +If the Docker daemon has been running with the iptables firewall backend, +restarting it with the nftables backend will delete most of Docker's iptables +chains and rules, and create nftables rules instead. + +If IP forwarding is not enabled, Docker will report an error when creating +a bridge network that needs it. Because of the default bridge, if IPv4 +forwarding is disabled, the error will be reported during daemon startup. +See [IP forwarding](#ip-forwarding). + +If you have rules in the `DOCKER-USER` chain, see [Migrating +`DOCKER-USER`](#migrating-docker-user). + +You may need to manually update the iptables `FORWARD` policy if it has +been set to `DROP` by Docker with iptables, or as part of your host's +firewall configuration. See [FORWARD policy in iptables](#forward-policy-in-iptables). + +### IP forwarding + +IP forwarding on the Docker host enables Docker functionality including port +publishing, communication between bridge networks, and direct routing from +outside the host to containers in bridge networks. + +When running with iptables, depending on network and daemon configuration, +Docker may enable IPv4 and IPv6 forwarding on the host. + +With its nftables firewall backend enabled, Docker will not enable IP forwarding +itself. It will report an error if forwarding is needed, but not already enabled. +To disable Docker's check for IP forwarding, letting it start and create networks +when it determines that forwarding is disabled, use Daemon option `--ip-forward=false`, +or `"ip-forward": false` in its configuration file. + +> [!WARNING] +> +> When enabling IP forwarding, make sure you have firewall rules to block +> unwanted forwarding between non-Docker interfaces. + +> [!NOTE] +> +> If you stop Docker to migrate to nftables, Docker may have already enabled +> IP forwarding on your system. After a reboot, if no other service re-enables +> forwarding, Docker will fail to start. + +If Docker is in a VM that has a single network interface and no other +software running, there is probably no unwanted forwarding to block. +But, on a physical host with multiple network interfaces, forwarding +between those interfaces should probably be blocked with nftables rules +unless the host is acting as a router. + +To enable IP forwarding on the host, set the following sysctls: + +- `net.ipv4.ip_forward=1` +- `net.ipv6.conf.all.forwarding=1` + +If your host uses `systemd`, you may be able to use `systemd-sysctl`. For +example, by editing `/etc/sysctl.d/99-sysctl.conf`. + +If the host is running `firewalld`, you may be able to use it to block +unwanted forwarding. Docker's bridges are in a firewalld zone called +`docker`, it creates a forwarding policy called `docker-forwarding` that +accepts forwarding from `ANY` zone to the `docker` zone. + +For example, to use nftables to block forwarding between interfaces `eth0` +and `eth1`, you could use: + +```console +table inet no-ext-forwarding { + chain no-ext-forwarding { + type filter hook forward priority filter; policy accept; + iifname "eth0" oifname "eth1" drop + iifname "eth1" oifname "eth0" drop + } +} +``` + +### FORWARD policy in iptables + +An iptables chain with `FORWARD` policy `DROP` will drop packets that have +been accepted by Docker's nftables rules, because the packet will be processed +by the iptables chains as well as Docker's nftables chains. + +Some features, including port publishing, will not work unless the `DROP` +policy is removed, or additional iptables rules are added to the iptables +`FORWARD` chain to accept Docker-related traffic. + +When Docker is using iptables, and it enables IP forwarding on the host, +it sets the default policy of the iptables `FORWARD` chain to `DROP`. So, +if you stop Docker to migrate to nftables, it may have set a `DROP` that +you need to remove. It will be removed anyway on reboot. + +To keep using rules in `DOCKER-USER` that rely on the chain having policy +`DROP`, you must add explicit `ACCEPT` rules for Docker related traffic. + +To check the current iptables `FORWARD` policy, use: + +```console +$ iptables -L FORWARD +Chain FORWARD (policy DROP) +target prot opt source destination +$ ip6tables -L FORWARD +Chain FORWARD (policy ACCEPT) +target prot opt source destination +``` + +To set the iptables policies to `ACCEPT` for IPv4 and IPv6: + +```console +$ iptables -P FORWARD ACCEPT +$ ip6tables -P FORWARD ACCEPT +``` + +### Migrating `DOCKER-USER` + +With firewall backend "iptables", rules added to the iptables `DOCKER-USER` +are processed before Docker's rules in the filter table's `FORWARD` chain. + +When starting the daemon with nftables after running with iptables, Docker +will not remove the jump from the `FORWARD` chain to `DOCKER-USER`. So, +rules created in `DOCKER-USER` will continue to run until the jump is +removed or the host is rebooted. + +When starting with nftables, the daemon will not add the jump. So, unless +there is an existing jump, rules in `DOCKER-USER` will be ignored. + +#### Migrating ACCEPT rules + +Some rules in the `DOCKER-USER` chain will continue to work. For example, if a +packet is dropped, it will be dropped before or after the nftables rules in +Docker's `filter-FORWARD` chain. But other rules, particularly `ACCEPT` rules +to override Docker's `DROP` rules, will not work. + +In nftables, an "accept" rule is not final. It terminates processing +for its base chain, but the accepted packet will still be processed by +other base chains, which may drop it. + +To override Docker's `drop` rule, you must use a firewall mark. Select a +mark not already in use on your host, and use Docker Engine option +`--bridge-accept-fwmark`. + +For example, `--bridge-accept-fwmark=1` tells the daemon to accept any +packet with an `fwmark` value of `1`. Optionally, you can supply a mask +to match specific bits in the mark, `--bridge-accept-fwmark=0x1/0x3`. + +Then, instead of accepting the packet in `DOCKER-USER`, add the firewall +mark you have chosen and Docker will not drop it. + +The firewall mark must be added before Docker's rules run. So if the mark +is added in a chain with type `filter` and hook `forward`, it must have +priority `filter - 1` or lower. + +#### Replacing `DOCKER-USER` with an nftables table + +Because nftables doesn't have pre-defined chains, to replace the `DOCKER-USER` +chain you can create your own table and add chains and rules to it. + +The `DOCKER-USER` chain has type `filter` and hook `forward`, so it can +only have rules in the filter forward chain. The base chains in your +table can have any `type` or `hook`. If your rules need to run before +Docker's rules, give the base chains a lower `priority` number than +Docker's chain. Or, a higher priority to make sure they run after Docker's +rules. + +Docker's base chains use the priority values defined at +[priority values](https://wiki.nftables.org/wiki-nftables/index.php/Netfilter_hooks#Priority_within_hook) + +#### Example: restricting external connections to containers + +By default, any remote host can connect to ports published to the Docker +host's external addresses. + +To allow only a specific IP or network to access the containers, create a +table with a base chain that has a drop rule. For example, the +following table drops packets from all IP addresses except `192.0.2.2`: + +```console +table ip my-table { + chain my-filter-forward { + type filter hook forward priority filter; policy accept; + iifname "ext_if" ip saddr != 192.0.2.2 counter drop + } +} +``` + +You will need to change `ext_if` to your host's external interface name. + +You could instead accept connections from a source subnet. The following +table only accepts access from the subnet `192.0.2.0/24`: + +```console +table ip my-table { + chain my-filter-forward { + type filter hook forward priority filter; policy accept; + iifname "ext_if" ip saddr != 192.0.2.0/24 counter drop + } +} +``` + +If you are running other services on the host that use IP forwarding +and need to be accessed by different external hosts, you will need more +specific filters. For example, to match the default prefix `br-` of +bridge devices belonging to Docker's user-defined bridge networks: + +```console +table ip my-table { + chain my-filter-forward { + type filter hook forward priority filter; policy accept; + iifname "ext_if" oifname "br-*" ip saddr != 192.0.2.0/24 counter drop + } +} +``` + +For more information about nftables configuration and advanced usage, +refer to the [nftables wiki](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page). diff --git a/content/manuals/engine/network/links.md b/content/manuals/engine/network/links.md index 0f97ccd40ab..e7133009fdb 100644 --- a/content/manuals/engine/network/links.md +++ b/content/manuals/engine/network/links.md @@ -44,12 +44,6 @@ Let's say you used this command to run a simple Python Flask application: $ docker run -d -P training/webapp python app.py ``` -> [!NOTE] -> -> Containers have an internal network and an IP address. -> Docker can have a variety of network configurations. You can see more -> information on Docker networking [here](index.md). - When that container was created, the `-P` flag was used to automatically map any network port inside it to a random high port within an *ephemeral port range* on your Docker host. Next, when `docker ps` was run, you saw that port @@ -176,7 +170,7 @@ You can also use `docker inspect` to return the container's name. > [!NOTE] > > Container names must be unique. That means you can only call -> one container `web`. If you want to re-use a container name you must delete +> one container `web`. If you want to reuse a container name you must delete > the old container (with `docker container rm`) before you can create a new > container with the same name. As an alternative you can use the `--rm` > flag with the `docker run` command. This deletes the container @@ -295,9 +289,9 @@ Docker uses this prefix format to define three distinct environment variables: * The `prefix_ADDR` variable contains the IP Address from the URL, for example `WEBDB_PORT_5432_TCP_ADDR=172.17.0.82`. -* The `prefix_PORT` variable contains just the port number from the URL for +* The `prefix_PORT` variable contains just the port number from the URL of example `WEBDB_PORT_5432_TCP_PORT=5432`. -* The `prefix_PROTO` variable contains just the protocol from the URL for +* The `prefix_PROTO` variable contains just the protocol from the URL of example `WEBDB_PORT_5432_TCP_PROTO=tcp`. If the container exposes multiple ports, an environment variable set is diff --git a/content/manuals/engine/network/packet-filtering-firewalls.md b/content/manuals/engine/network/packet-filtering-firewalls.md index 99f28b38df9..dfeabeedf11 100644 --- a/content/manuals/engine/network/packet-filtering-firewalls.md +++ b/content/manuals/engine/network/packet-filtering-firewalls.md @@ -8,369 +8,81 @@ aliases: - /network/packet-filtering-firewalls/ --- -On Linux, Docker creates `iptables` and `ip6tables` rules to implement network -isolation, port publishing and filtering. +On Linux, Docker creates firewall rules to implement network +isolation, [port publishing](./port-publishing.md) and filtering. Because these rules are required for the correct functioning of Docker bridge networks, you should not modify the rules created by Docker. -But, if you are running Docker on a host exposed to the internet, you will -probably want to add iptables policies that prevent unauthorized access to -containers or other services running on your host. This page describes how -to achieve that, and the caveats you need to be aware of. +This page describes options that control Docker's firewall rules to +implement functionality including port publishing, and NAT/masquerading. > [!NOTE] > -> Docker creates `iptables` rules for bridge networks. +> Docker creates firewall rules for bridge networks. > -> No `iptables` rules are created for `ipvlan`, `macvlan` or `host` networking. +> No rules are created for `ipvlan`, `macvlan` or `host` networking. -## Docker and iptables chains +## Firewall backend -In the `filter` table, Docker sets the default policy to `DROP`, and creates the -following custom `iptables` chains: +By default, Docker Engine creates its firewall rules using iptables, +see [Docker with iptables](./firewall-iptables.md). It also has +support for nftables, see [Docker with nftables](./firewall-nftables.md). -* `DOCKER-USER` - * A placeholder for user-defined rules that will be processed before rules - in the `DOCKER-FORWARD` and `DOCKER` chains. -* `DOCKER-FORWARD` - * The first stage of processing for Docker's networks. Rules that pass packets - that are not related to established connections to the other Docker chains, - as well as rules to accept packets that are part of established connections. -* `DOCKER` - * Rules that determine whether a packet that is not part of an established - connection should be accepted, based on the port forwarding configuration - of running containers. -* `DOCKER-ISOLATION-STAGE-1` and `DOCKER-ISOLATION-STAGE-2` - * Rules to isolate Docker networks from each other. -* `DOCKER-INGRESS` - * Rules related to Swarm networking. +For bridge networks, iptables and nftables have the same functionality. -In the `FORWARD` chain, Docker adds rules that unconditionally jump to the -`DOCKER-USER`, `DOCKER-FORWARD` and `DOCKER-INGRESS` chains. - -In the `nat` table, Docker creates chain `DOCKER` and adds rules to implement -masquerading and port-mapping. - -### Add iptables policies before Docker's rules - -Packets that get accepted or rejected by rules in these custom chains will not -be seen by user-defined rules appended to the `FORWARD` chain. So, to add -additional rules to filter these packets, use the `DOCKER-USER` chain. - -Rules appended to the `FORWARD` chain will be processed after Docker's rules. - -### Match the original IP and ports for requests - -When packets arrive to the `DOCKER-USER` chain, they have already passed through -a Destination Network Address Translation (DNAT) filter. That means that the -`iptables` flags you use can only match internal IP addresses and ports of -containers. - -If you want to match traffic based on the original IP and port in the network -request, you must use the -[`conntrack` iptables extension](https://ipset.netfilter.org/iptables-extensions.man.html#lbAO). -For example: - -```console -$ sudo iptables -I DOCKER-USER -p tcp -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT -$ sudo iptables -I DOCKER-USER -p tcp -m conntrack --ctorigdst 198.51.100.2 --ctorigdstport 80 -j ACCEPT -``` - -> [!IMPORTANT] -> -> Using the `conntrack` extension may result in degraded performance. - -## Port publishing and mapping - -By default, for both IPv4 and IPv6, the daemon blocks access to ports that have not -been published. Published container ports are mapped to host IP addresses. -To do this, it uses iptables to perform Network Address Translation (NAT), -Port Address Translation (PAT), and masquerading. - -For example, `docker run -p 8080:80 [...]` creates a mapping -between port 8080 on any address on the Docker host, and the container's -port 80. Outgoing connections from the container will masquerade, using -the Docker host's IP address. - -### Restrict external connections to containers - -By default, all external source IPs are allowed to connect to ports that have -been published to the Docker host's addresses. - -To allow only a specific IP or network to access the containers, insert a -negated rule at the top of the `DOCKER-USER` filter chain. For example, the -following rule drops packets from all IP addresses except `192.0.2.2`: - -```console -$ iptables -I DOCKER-USER -i ext_if ! -s 192.0.2.2 -j DROP -``` - -You will need to change `ext_if` to correspond with your -host's actual external interface. You could instead allow connections from a -source subnet. The following rule only allows access from the subnet `192.0.2.0/24`: - -```console -$ iptables -I DOCKER-USER -i ext_if ! -s 192.0.2.0/24 -j DROP -``` - -Finally, you can specify a range of IP addresses to accept using `--src-range` -(Remember to also add `-m iprange` when using `--src-range` or `--dst-range`): - -```console -$ iptables -I DOCKER-USER -m iprange -i ext_if ! --src-range 192.0.2.1-192.0.2.3 -j DROP -``` - -You can combine `-s` or `--src-range` with `-d` or `--dst-range` to control both -the source and destination. For instance, if the Docker host has addresses -`2001:db8:1111::2` and `2001:db8:2222::2`, you can make rules specific to -`2001:db8:1111::2` and leave `2001:db8:2222::2` open. - -You may need to allow responses from servers outside the permitted external address -ranges. For example, containers may send DNS or HTTP requests to hosts that are -not allowed to access the container's services. The following rule accepts any -incoming or outgoing packet belonging to a flow that has already been accepted -by other rules. It must be placed before `DROP` rules that restrict access from -external address ranges. - -```console -$ iptables -I DOCKER-USER -m state --state RELATED,ESTABLISHED -j ACCEPT -``` - -`iptables` is complicated. There is a lot more information at [Netfilter.org HOWTO](https://www.netfilter.org/documentation/HOWTO/NAT-HOWTO.html). - -### Direct routing - -Port mapping ensures that published ports are accessible on the host's -network addresses, which are likely to be routable for any external -clients. No routes are normally set up in the host's network for container -addresses that exist within a host. - -But, particularly with IPv6 you may prefer to avoid using NAT and instead -arrange for external routing to container addresses ("direct routing"). - -To access containers on a bridge network from outside the Docker host, -you must set up routing to the bridge network via an address on the Docker -host. This can be achieved using static routes, Border Gateway Protocol -(BGP), or any other means appropriate for your network. - -Within a local layer 2 network, remote hosts can set up static routes -to a container network using the Docker daemon host's address on the local -network. Those hosts can access containers directly. For remote hosts -outside the local network, direct access to containers requires router -configuration to enable the necessary routing. - -#### Gateway modes - -The bridge network driver has the following options: -- `com.docker.network.bridge.gateway_mode_ipv6` -- `com.docker.network.bridge.gateway_mode_ipv4` - -Each of these can be set to one of the gateway modes: -- `nat` -- `nat-unprotected` -- `routed` -- `isolated` - -The default is `nat`, NAT and masquerading rules are set up for each -published container port. Packets leaving the host will use a host address. - -With mode `routed`, no NAT or masquerading rules are set up, but `iptables` -are still set up so that only published container ports are accessible. -Outgoing packets from the container will use the container's address, -not a host address. - -In `nat` mode, when a port is published to a specific host address, that -port is only accessible via the host interface with that address. So, -for example, publishing a port to an address on the loopback interface -means remote hosts cannot access it. - -However, using direct routing, published container ports are always -accessible from remote hosts, unless the Docker host's firewall has -additional restrictions. Hosts on the local layer-2 network can set up -direct routing without needing any additional network configuration. -Hosts outside the local network can only use direct routing to the -container if the network's routers are configured to enable it. - -In `nat-unprotected` mode, unpublished container ports are also -accessible using direct routing, no port filtering rules are set up. -This mode is included for compatibility with legacy default behaviour. - -The gateway mode also affects communication between containers that -are connected to different Docker networks on the same host. -- In `nat` and `nat-unprotected` modes, containers in other bridge - networks can only access published ports via the host addresses they - are published to. Direct routing from other networks is not allowed. -- In `routed` mode containers in other networks can use direct - routing to access ports, without going via a host address. - -In `routed` mode, a host port in a `-p` or `--publish` port mapping is -not used, and the host address is only used to decide whether to apply -the mapping to IPv4 or IPv6. So, when a mapping only applies to `routed` -mode, only addresses `0.0.0.0` or `::` should be used, and a host port -should not be given. If a specific address or port is given, it will -have no effect on the published port and a warning message will be -logged. - -Mode `isolated` can only be used when the network is also created with -CLI flag `--internal`, or equivalent. An address is normally assigned to the -bridge device in an `internal` network. So, processes on the docker host can -access the network, and containers in the network can access host services -listening on that bridge address (including services listening on "any" host -address, `0.0.0.0` or `::`). No address is assigned to the bridge when the -network is created with gateway mode `isolated`. - -#### Example - -Create a network suitable for direct routing for IPv6, with NAT enabled -for IPv4: -```console -$ docker network create --ipv6 --subnet 2001:db8::/64 -o com.docker.network.bridge.gateway_mode_ipv6=routed mynet -``` - -Create a container with a published port: -```console -$ docker run --network=mynet -p 8080:80 myimage -``` - -Then: -- Only container port 80 will be open, for IPv4 and IPv6. -- For IPv6, using `routed` mode, port 80 will be open on the container's IP - address. Port 8080 will not be opened on the host's IP addresses, and - outgoing packets will use the container's IP address. -- For IPv4, using the default `nat` mode, the container's port 80 will be - accessible via port 8080 on the host's IP addresses, as well as directly - from within the Docker host. But, container port 80 cannot be accessed - directly from outside the host. - Connections originating from the container will masquerade, using the - host's IP address. - -In `docker inspect`, this port mapping will be shown as follows. Note that -there is no `HostPort` for IPv6, because it is using `routed` mode: -```console -$ docker container inspect --format "{{json .NetworkSettings.Ports}}" -{"80/tcp":[{"HostIp":"0.0.0.0","HostPort":"8080"},{"HostIp":"::","HostPort":""}]} -``` - -Alternatively, to make the mapping IPv6-only, disabling IPv4 access to the -container's port 80, use the unspecified IPv6 address `[::]` and do not -include a host port number: -```console -$ docker run --network mynet -p '[::]::80' -``` - -### Setting the default bind address for containers - -By default, when a container's ports are mapped without any specific host -address, the Docker daemon binds published container ports to all host -addresses (`0.0.0.0` and `[::]`). - -For example, the following command publishes port 8080 to all network -interfaces on the host, on both IPv4 and IPv6 addresses, potentially -making them available to the outside world. - -```console -docker run -p 8080:80 nginx -``` - -You can change the default binding address for published container ports so that -they're only accessible to the Docker host by default. To do that, you can -configure the daemon to use the loopback address (`127.0.0.1`) instead. - -> [!WARNING] -> -> In releases older than 28.0.0, hosts within the same L2 segment (for example, -> hosts connected to the same network switch) can reach ports published to -> localhost. For more information, see -> [moby/moby#45610](https://github.com/moby/moby/issues/45610) - -To configure this setting for user-defined bridge networks, use -the `com.docker.network.bridge.host_binding_ipv4` -[driver option](./drivers/bridge.md#options) when you create the network. - -```console -$ docker network create mybridge \ - -o "com.docker.network.bridge.host_binding_ipv4=127.0.0.1" -``` - -> [!NOTE] -> -> - Setting the default binding address to `::` means port bindings with no host -> address specified will work for any IPv6 address on the host. But, `0.0.0.0` -> means any IPv4 or IPv6 address. -> - Changing the default bind address doesn't have any effect on Swarm services. -> Swarm services are always exposed on the `0.0.0.0` network interface. - -#### Default bridge - -To set the default binding for the default bridge network, configure the `"ip"` -key in the `daemon.json` configuration file: - -```json -{ - "ip": "127.0.0.1" -} -``` - -This changes the default binding address to `127.0.0.1` for published container -ports on the default bridge network. -Restart the daemon for this change to take effect. -Alternatively, you can use the `dockerd --ip` flag when starting the daemon. +Docker Engine option `firewall-backend` can be used to select whether +iptables or nftables is used. See +[daemon configuration](https://docs.docker.com/reference/cli/dockerd/). ## Docker on a router On Linux, Docker needs "IP Forwarding" enabled on the host. So, it enables the `sysctl` settings `net.ipv4.ip_forward` and `net.ipv6.conf.all.forwarding` -it they are not already enabled when it starts. When it does that, it also -sets the policy of the iptables `FORWARD` chain to `DROP`. +if they are not already enabled when it starts. When it does that, it also +configures the firewall to drop forwarded packets unless they are explicitly +accepted. -If Docker sets the policy for the `FORWARD` chain to `DROP`. This will prevent -your Docker host from acting as a router, it is the recommended setting when -IP Forwarding is enabled. +When Docker sets the default forwarding policy to "drop", it will prevent +your Docker host from acting as a router. This is the recommended setting when +IP Forwarding is enabled, unless router functionality is required. -To stop Docker from setting the `FORWARD` chain's policy to `DROP`, include +To stop Docker from setting the forwarding policy to "drop", include `"ip-forward-no-drop": true` in `/etc/docker/daemon.json`, or add option `--ip-forward-no-drop` to the `dockerd` command line. -Alternatively, you may add `ACCEPT` rules to the `DOCKER-USER` chain for the -packets you want to forward. For example: - -```console -$ iptables -I DOCKER-USER -i src_if -o dst_if -j ACCEPT -``` - -> [!WARNING] -> -> In releases older than 28.0.0, Docker always set the default policy of the -> IPv6 `FORWARD` chain to `DROP`. In release 28.0.0 and newer, it will only -> set that policy if it enables IPv6 forwarding itself. This has always been -> the behaviour for IPv4 forwarding. +> [!NOTE] > -> If IPv6 forwarding is enabled on your host before Docker starts, check your -> host's configuration to make sure it is still secure. +> With the experimental nftables backend, Docker does not enable IP forwarding +> itself, and it will not create a default "drop" nftables policy. See +> [Migrating from iptables to nftables](./firewall-nftables.md#migrating-from-iptables-to-nftables). -## Prevent Docker from manipulating iptables +## Prevent Docker from manipulating firewall rules -It is possible to set the `iptables` or `ip6tables` keys to `false` in -[daemon configuration](https://docs.docker.com/reference/cli/dockerd/), but -this option is not appropriate for most users. It is likely to break +Setting the `iptables` or `ip6tables` keys to `false` in +[daemon configuration](https://docs.docker.com/reference/cli/dockerd/), will +prevent Docker from creating most of its `iptables` or `nftables` rules. But, +this option is not appropriate for most users, it is likely to break container networking for the Docker Engine. -All ports of all containers will be accessible from the network, and none -will be mapped from Docker host IP addresses. +For example, with Docker's firewalling disabled and no replacement +rules, containers in bridge networks will not be able to access +internet hosts by masquerading, but all of their ports will be accessible +to hosts on the local network. -It is not possible to completely prevent Docker from creating `iptables` +It is not possible to completely prevent Docker from creating firewall rules, and creating rules after-the-fact is extremely involved and beyond the scope of these instructions. ## Integration with firewalld -If you are running Docker with the `iptables` option set to `true`, and -[firewalld](https://firewalld.org) is enabled on your system, Docker -automatically creates a `firewalld` zone called `docker`, with target `ACCEPT`. +If you are running Docker with the `iptables` or `ip6tables` options set to +`true`, and [firewalld](https://firewalld.org) is enabled on your system, in +addition to its usual iptables or nftables rules, Docker creates a `firewalld` +zone called `docker`, with target `ACCEPT`. -All network interfaces created by Docker (for example, `docker0`) are inserted -into the `docker` zone. +All bridge network interfaces created by Docker (for example, `docker0`) are +inserted into the `docker` zone. Docker also creates a forwarding policy called `docker-forwarding` that allows forwarding from `ANY` zone to the `docker` zone. @@ -379,8 +91,8 @@ forwarding from `ANY` zone to the `docker` zone. [Uncomplicated Firewall](https://launchpad.net/ufw) (ufw) is a frontend that ships with Debian and Ubuntu, -and it lets you manage firewall rules. Docker and ufw use iptables in ways -that make them incompatible with each other. +and it lets you manage firewall rules. Docker and ufw use firewall rules in +ways that make them incompatible with each other. When you publish a container's ports using Docker, traffic to and from that container gets diverted before it goes through the ufw firewall settings. diff --git a/content/manuals/engine/network/port-publishing.md b/content/manuals/engine/network/port-publishing.md new file mode 100644 index 00000000000..c51d7d2b3ac --- /dev/null +++ b/content/manuals/engine/network/port-publishing.md @@ -0,0 +1,328 @@ +--- +title: Port publishing and mapping +weight: 10 +description: Accessing container ports +keywords: network, iptables, firewall +--- + +By default, for both IPv4 and IPv6, the Docker daemon blocks access to ports that +have not been published. Published container ports are mapped to host IP addresses. +To do this, it uses firewall rules to perform Network Address Translation (NAT), +Port Address Translation (PAT), and masquerading. + +For example, `docker run -p 8080:80 [...]` creates a mapping +between port 8080 on any address on the Docker host, and the container's +port 80. Outgoing connections from the container will masquerade, using +the Docker host's IP address. + +## Publishing ports + +When you create or run a container using `docker create` or `docker run`, all +ports of containers on bridge networks are accessible from the Docker host and +other containers connected to the same network. Ports are not accessible from +outside the host or, with the default configuration, from containers in other +networks. + +Use the `--publish` or `-p` flag to make a port available outside the host, +and to containers in other bridge networks. + +This creates a firewall rule in the host, +mapping a container port to a port on the Docker host to the outside world. +Here are some examples: + +| Flag value | Description | +| ------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `-p 8080:80` | Map port `8080` on the Docker host to TCP port `80` in the container. | +| `-p 192.168.1.100:8080:80` | Map port `8080` on the Docker host IP `192.168.1.100` to TCP port `80` in the container. | +| `-p 8080:80/udp` | Map port `8080` on the Docker host to UDP port `80` in the container. | +| `-p 8080:80/tcp -p 8080:80/udp` | Map TCP port `8080` on the Docker host to TCP port `80` in the container, and map UDP port `8080` on the Docker host to UDP port `80` in the container. | + +> [!IMPORTANT] +> +> Publishing container ports is insecure by default. Meaning, when you publish +> a container's ports it becomes available not only to the Docker host, but to +> the outside world as well. +> +> If you include the localhost IP address (`127.0.0.1`, or `::1`) with the +> publish flag, only the Docker host can access the published container port. +> +> ```console +> $ docker run -p 127.0.0.1:8080:80 -p '[::1]:8080:80' nginx +> ``` +> +> > [!WARNING] +> > +> > In releases older than 28.0.0, hosts within the same L2 segment (for example, +> > hosts connected to the same network switch) can reach ports published to localhost. +> > For more information, see +> > [moby/moby#45610](https://github.com/moby/moby/issues/45610) + +Ports on the host's IPv6 addresses will map to the container's IPv4 address +if no host IP is given in a port mapping, the bridge network is IPv4-only, +and `--userland-proxy=true` (default). + +## Direct routing + +Port mapping ensures that published ports are accessible on the host's +network addresses, which are likely to be routable for any external +clients. No routes are normally set up in the host's network for container +addresses that exist within a host. + +But, particularly with IPv6 you may prefer to avoid using NAT and instead +arrange for external routing to container addresses ("direct routing"). + +To access containers on a bridge network from outside the Docker host, +you must first set up routing to the bridge network via an address on the +Docker host. This can be achieved using static routes, Border Gateway Protocol (BGP), +or any other means appropriate for your network. For example, within +a local layer 2 network, remote hosts can set up static routes to a container +network via the Docker daemon host's address on the local network. + +### Direct routing to containers in bridge networks + +By default, remote hosts are not allowed direct access to container IP +addresses in Docker's Linux bridge networks. They can only access ports +published to host IP addresses. + +To allow direct access to any published port, on any container, in any +Linux bridge network, use daemon option `"allow-direct-routing": true` +in `/etc/docker/daemon.json` or the equivalent `--allow-direct-routing`. + +To allow direct routing from anywhere to containers in a specific bridge +network, see [Gateway modes](#gateway-modes). + +Or, to allow direct routing via specific host interfaces, to a specific +bridge network, use the following option when creating the network: +- `com.docker.network.bridge.trusted_host_interfaces` + +#### Example + +Create a network where published ports on container IP addresses can be +accessed directly from interfaces `vxlan.1` and `eth3`: + +```console +$ docker network create --subnet 192.0.2.0/24 --ip-range 192.0.2.0/29 -o com.docker.network.bridge.trusted_host_interfaces="vxlan.1:eth3" mynet +``` + +Run a container in that network, publishing its port 80 to port 8080 on +the host's loopback interface: + +```console +$ docker run -d --ip 192.0.2.100 -p 127.0.0.1:8080:80 nginx +``` + +The web server running on the container's port 80 can now be accessed +from the Docker host at `http://127.0.0.1:8080`, or directly at +`http://192.0.2.100:80`. If remote hosts on networks connected to +interfaces `vxlan.1` and `eth3` have a route to the `192.0.2.0/24` +network inside the Docker host, they can also access the web server +via `http://192.0.2.100:80`. + +## Gateway modes + +The bridge network driver has the following options: +- `com.docker.network.bridge.gateway_mode_ipv6` +- `com.docker.network.bridge.gateway_mode_ipv4` + +Each of these can be set to one of the gateway modes: +- `nat` +- `nat-unprotected` +- `routed` +- `isolated` + +The default is `nat`, NAT and masquerading rules are set up for each +published container port. Packets leaving the host will use a host address. + +With mode `routed`, no NAT or masquerading rules are set up, but firewall +rules are still set up so that only published container ports are accessible. +Outgoing packets from the container will use the container's address, +not a host address. + +To access a published port in a `routed` network, remote hosts must have +a route to the container network via an external address on the Docker +host ("direct routing"). Hosts on the local layer-2 network can set up +direct routing without needing any additional network configuration. +Hosts outside the local network can only use direct routing to the +container if the network's routers are configured to enable it. + +In a `nat` mode network, publishing a port to an address on the loopback +interface means remote hosts cannot access it. Other published container +ports in `routed` and `nat` networks are always accessible from remote +hosts using direct routing, unless the Docker host's firewall has additional +restrictions. + +> [!NOTE] +> +> When a port is published to a specific host address in `nat` mode, if +> IP forwarding is enabled on the Docker host, the published port can be +> accessed via other host interfaces using direct routing to the host +> address. +> +> For example, a Docker host with IP forwarding enabled has two NICs with +> addresses `192.168.100.10/24` and `10.0.0.10/24`. +> When a port is published to `192.168.100.10`, a host in the `10.0.0.0/24` +> subnet can access that port by routing to `192.168.100.10` via `10.0.0.10`. + +In `nat-unprotected` mode, unpublished container ports are also +accessible using direct routing, no port filtering rules are set up. +This mode is included for compatibility with legacy default behaviour. + +The gateway mode also affects communication between containers that +are connected to different Docker networks on the same host. +- In `nat` and `nat-unprotected` modes, containers in other bridge + networks can only access published ports via the host addresses they + are published to. Direct routing from other networks is not allowed. +- In `routed` mode containers in other networks can use direct + routing to access ports, without going via a host address. + +In `routed` mode, a host port in a `-p` or `--publish` port mapping is +not used, and the host address is only used to decide whether to apply +the mapping to IPv4 or IPv6. So, when a mapping only applies to `routed` +mode, only addresses `0.0.0.0` or `::` should be used, and a host port +should not be given. If a specific address or port is given, it will +have no effect on the published port and a warning message will be +logged. + +Mode `isolated` can only be used when the network is also created with +CLI flag `--internal`, or equivalent. An address is normally assigned to the +bridge device in an `internal` network. So, processes on the Docker host can +access the network, and containers in the network can access host services +listening on that bridge address (including services listening on "any" host +address, `0.0.0.0` or `::`). No address is assigned to the bridge when the +network is created with gateway mode `isolated`. + +### Example + +Create a network suitable for direct routing for IPv6, with NAT enabled +for IPv4: +```console +$ docker network create --ipv6 --subnet 2001:db8::/64 -o com.docker.network.bridge.gateway_mode_ipv6=routed mynet +``` + +Create a container with a published port: +```console +$ docker run --network=mynet -p 8080:80 myimage +``` + +Then: +- Only container port 80 will be open, for IPv4 and IPv6. +- For IPv6, using `routed` mode, port 80 will be open on the container's IP + address. Port 8080 will not be opened on the host's IP addresses, and + outgoing packets will use the container's IP address. +- For IPv4, using the default `nat` mode, the container's port 80 will be + accessible via port 8080 on the host's IP addresses, as well as directly + from within the Docker host. But, container port 80 cannot be accessed + directly from outside the host. + Connections originating from the container will masquerade, using the + host's IP address. + +In `docker inspect`, this port mapping will be shown as follows. Note that +there is no `HostPort` for IPv6, because it is using `routed` mode: +```console +$ docker container inspect --format "{{json .NetworkSettings.Ports}}" +{"80/tcp":[{"HostIp":"0.0.0.0","HostPort":"8080"},{"HostIp":"::","HostPort":""}]} +``` + +Alternatively, to make the mapping IPv6-only, disabling IPv4 access to the +container's port 80, use the unspecified IPv6 address `[::]` and do not +include a host port number: +```console +$ docker run --network mynet -p '[::]::80' +``` + +## Setting the default bind address for containers + +By default, when a container's ports are mapped without any specific host +address, the Docker daemon publishes ports to all host addresses +(`0.0.0.0` and `[::]`). + +For example, the following command publishes port 8080 to all network +interfaces on the host, on both IPv4 and IPv6 addresses, potentially +making them available to the outside world. + +```console +docker run -p 8080:80 nginx +``` + +You can change the default binding address for published container ports so that +they're only accessible to the Docker host by default. To do that, you can +configure the daemon to use the loopback address (`127.0.0.1`) instead. + +> [!WARNING] +> +> In releases older than 28.0.0, hosts within the same L2 segment (for example, +> hosts connected to the same network switch) can reach ports published to +> localhost. For more information, see +> [moby/moby#45610](https://github.com/moby/moby/issues/45610) + +To configure this setting for user-defined bridge networks, use +the `com.docker.network.bridge.host_binding_ipv4` +[driver option](./drivers/bridge.md#default-host-binding-address) when you +create the network. Despite the option name, it is possible to specify an +IPv6 address. + +```console +$ docker network create mybridge \ + -o "com.docker.network.bridge.host_binding_ipv4=127.0.0.1" +``` + +Or, to set the default binding address for containers in all user-defined +bridge networks, use daemon configuration option `default-network-opts`. +For example: + +```json +{ + "default-network-opts": { + "bridge": { + "com.docker.network.bridge.host_binding_ipv4": "127.0.0.1" + } + } +} +``` + +> [!NOTE] +> +> Setting the default binding address to `::` means port bindings with no host +> address specified will work for any IPv6 address on the host. But, `0.0.0.0` +> means any IPv4 or IPv6 address. +> +> Changing the default bind address doesn't have any effect on Swarm services. +> Swarm services are always exposed on the `0.0.0.0` network interface. + +### Masquerade or SNAT for outgoing packets + +NAT is enabled by default for bridge networks, meaning outgoing packets +from containers are masqueraded. The source address of packets leaving +the Docker host is changed to an address on the host interface the packet +is sent on. + +Masquerading can be disabled for a user-defined bridge network by using +the `com.docker.network.bridge.enable_ip_masquerade` driver option when +creating the network. For example: +```console +$ docker network create mybridge \ + -o com.docker.network.bridge.enable_ip_masquerade=false ... +``` + +To use a specific source address for outgoing packets for a user-defined +network, instead of letting masquerading select an address, use options +`com.docker.network.host_ipv4` and `com.docker.network.host_ipv6` to +specify the Source NAT (SNAT) address to use. The +`com.docker.network.bridge.enable_ip_masquerade` option must +be `true`, the default, for these options to have any effect. + +### Default bridge + +To set the default binding for the default bridge network, configure the `"ip"` +key in the `daemon.json` configuration file: + +```json +{ + "ip": "127.0.0.1" +} +``` + +This changes the default binding address to `127.0.0.1` for published container +ports on the default bridge network. +Restart the daemon for this change to take effect. +Alternatively, you can use the `dockerd --ip` flag when starting the daemon. diff --git a/content/manuals/engine/network/tutorials/_index.md b/content/manuals/engine/network/tutorials/_index.md deleted file mode 100644 index 9b3fccaf423..00000000000 --- a/content/manuals/engine/network/tutorials/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -build: - render: never -title: Tutorials -weight: 30 ---- diff --git a/content/manuals/engine/network/tutorials/host.md b/content/manuals/engine/network/tutorials/host.md deleted file mode 100644 index 61a46b63cf6..00000000000 --- a/content/manuals/engine/network/tutorials/host.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Networking using the host network -description: Tutorials for networking using the host network, disabling network isolation -keywords: networking, host, standalone -aliases: - - /network/network-tutorial-host/ ---- - -This series of tutorials deals with networking standalone containers which bind -directly to the Docker host's network, with no network isolation. For other -networking topics, see the [overview](/manuals/engine/network/_index.md). - -## Goal - -The goal of this tutorial is to start a `nginx` container which binds directly -to port 80 on the Docker host. From a networking point of view, this is the -same level of isolation as if the `nginx` process were running directly on the -Docker host and not in a container. However, in all other ways, such as storage, -process namespace, and user namespace, the `nginx` process is isolated from the -host. - -## Prerequisites - -- This procedure requires port 80 to be available on the Docker host. To make - Nginx listen on a different port, see the - [documentation for the `nginx` image](https://hub.docker.com/_/nginx/) - -- The `host` networking driver only works on Linux hosts, and as an opt-in - feature in Docker Desktop version 4.34 and later. To enable this feature in - Docker Desktop, navigate to the **Resources** tab in **Settings**, and then - under **Network** select **Enable host networking**. - -## Procedure - -1. Create and start the container as a detached process. The `--rm` option means to remove the container once it exits/stops. The `-d` flag means to start the container detached (in the background). - - ```console - $ docker run --rm -d --network host --name my_nginx nginx - ``` - -2. Access Nginx by browsing to - [http://localhost:80/](http://localhost:80/). - -3. Examine your network stack using the following commands: - - - Examine all network interfaces and verify that a new one was not created. - - ```console - $ ip addr show - ``` - - - Verify which process is bound to port 80, using the `netstat` command. You - need to use `sudo` because the process is owned by the Docker daemon user - and you otherwise won't be able to see its name or PID. - - ```console - $ sudo netstat -tulpn | grep :80 - ``` - -4. Stop the container. It will be removed automatically as it was started using the `--rm` option. - - ```console - docker container stop my_nginx - ``` - -## Other networking tutorials - -- [Standalone networking tutorial](/manuals/engine/network/tutorials/standalone.md) -- [Overlay networking tutorial](/manuals/engine/network/tutorials/overlay.md) -- [Macvlan networking tutorial](/manuals/engine/network/tutorials/macvlan.md) diff --git a/content/manuals/engine/network/tutorials/macvlan.md b/content/manuals/engine/network/tutorials/macvlan.md deleted file mode 100644 index 12f9719c9c2..00000000000 --- a/content/manuals/engine/network/tutorials/macvlan.md +++ /dev/null @@ -1,225 +0,0 @@ ---- -title: Networking using a macvlan network -description: Tutorials for networking using a macvlan bridge network and 802.1Q trunk - bridge network -keywords: networking, macvlan, 802.1Q, standalone -aliases: - - /network/network-tutorial-macvlan/ ---- - -This series of tutorials deals with networking standalone containers which -connect to `macvlan` networks. In this type of network, the Docker host accepts -requests for multiple MAC addresses at its IP address, and routes those requests -to the appropriate container. For other networking topics, see the -[overview](/manuals/engine/network/_index.md). - -## Goal - -The goal of these tutorials is to set up a bridged `macvlan` network and attach -a container to it, then set up an 802.1Q trunked `macvlan` network and attach a -container to it. - -## Prerequisites - -- Most cloud providers block `macvlan` networking. You may need physical access - to your networking equipment. - -- The `macvlan` networking driver only works on Linux hosts, and is not supported - on Docker Desktop or Docker Engine on Windows. - -- You need at least version 3.9 of the Linux kernel, and version 4.0 or higher - is recommended. - -- The examples assume your ethernet interface is `eth0`. If your device has a - different name, use that instead. - -- The `macvlan` driver is not supported in rootless mode. - -## Bridge example - -In the simple bridge example, your traffic flows through `eth0` and Docker -routes traffic to your container using its MAC address. To network devices -on your network, your container appears to be physically attached to the network. - -1. Create a `macvlan` network called `my-macvlan-net`. Modify the `subnet`, `gateway`, - and `parent` values to values that make sense in your environment. - - ```console - $ docker network create -d macvlan \ - --subnet=172.16.86.0/24 \ - --gateway=172.16.86.1 \ - -o parent=eth0 \ - my-macvlan-net - ``` - - You can use `docker network ls` and `docker network inspect my-macvlan-net` - commands to verify that the network exists and is a `macvlan` network. - -2. Start an `alpine` container and attach it to the `my-macvlan-net` network. The - `-dit` flags start the container in the background but allow you to attach - to it. The `--rm` flag means the container is removed when it is stopped. - - ```console - $ docker run --rm -dit \ - --network my-macvlan-net \ - --name my-macvlan-alpine \ - alpine:latest \ - ash - ``` - -3. Inspect the `my-macvlan-alpine` container and notice the `MacAddress` key - within the `Networks` key: - - ```console - $ docker container inspect my-macvlan-alpine - - ...truncated... - "Networks": { - "my-macvlan-net": { - "IPAMConfig": null, - "Links": null, - "Aliases": [ - "bec64291cd4c" - ], - "NetworkID": "5e3ec79625d388dbcc03dcf4a6dc4548644eb99d58864cf8eee2252dcfc0cc9f", - "EndpointID": "8caf93c862b22f379b60515975acf96f7b54b7cf0ba0fb4a33cf18ae9e5c1d89", - "Gateway": "172.16.86.1", - "IPAddress": "172.16.86.2", - "IPPrefixLen": 24, - "IPv6Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "MacAddress": "02:42:ac:10:56:02", - "DriverOpts": null - } - } - ...truncated - ``` - -4. Check out how the container sees its own network interfaces by running a - couple of `docker exec` commands. - - ```console - $ docker exec my-macvlan-alpine ip addr show eth0 - - 9: eth0@tunl0: mtu 1500 qdisc noqueue state UP - link/ether 02:42:ac:10:56:02 brd ff:ff:ff:ff:ff:ff - inet 172.16.86.2/24 brd 172.16.86.255 scope global eth0 - valid_lft forever preferred_lft forever - ``` - - ```console - $ docker exec my-macvlan-alpine ip route - - default via 172.16.86.1 dev eth0 - 172.16.86.0/24 dev eth0 scope link src 172.16.86.2 - ``` - -5. Stop the container (Docker removes it because of the `--rm` flag), and remove - the network. - - ```console - $ docker container stop my-macvlan-alpine - - $ docker network rm my-macvlan-net - ``` - -## 802.1Q trunked bridge example - -In the 802.1Q trunked bridge example, your traffic flows through a sub-interface -of `eth0` (called `eth0.10`) and Docker routes traffic to your container using -its MAC address. To network devices on your network, your container appears to -be physically attached to the network. - -1. Create a `macvlan` network called `my-8021q-macvlan-net`. Modify the - `subnet`, `gateway`, and `parent` values to values that make sense in your - environment. - - ```console - $ docker network create -d macvlan \ - --subnet=172.16.86.0/24 \ - --gateway=172.16.86.1 \ - -o parent=eth0.10 \ - my-8021q-macvlan-net - ``` - - You can use `docker network ls` and `docker network inspect my-8021q-macvlan-net` - commands to verify that the network exists, is a `macvlan` network, and - has parent `eth0.10`. You can use `ip addr show` on the Docker host to - verify that the interface `eth0.10` exists and has a separate IP address - -2. Start an `alpine` container and attach it to the `my-8021q-macvlan-net` - network. The `-dit` flags start the container in the background but allow - you to attach to it. The `--rm` flag means the container is removed when it - is stopped. - - ```console - $ docker run --rm -itd \ - --network my-8021q-macvlan-net \ - --name my-second-macvlan-alpine \ - alpine:latest \ - ash - ``` - -3. Inspect the `my-second-macvlan-alpine` container and notice the `MacAddress` - key within the `Networks` key: - - ```console - $ docker container inspect my-second-macvlan-alpine - - ...truncated... - "Networks": { - "my-8021q-macvlan-net": { - "IPAMConfig": null, - "Links": null, - "Aliases": [ - "12f5c3c9ba5c" - ], - "NetworkID": "c6203997842e654dd5086abb1133b7e6df627784fec063afcbee5893b2bb64db", - "EndpointID": "aa08d9aa2353c68e8d2ae0bf0e11ed426ea31ed0dd71c868d22ed0dcf9fc8ae6", - "Gateway": "172.16.86.1", - "IPAddress": "172.16.86.2", - "IPPrefixLen": 24, - "IPv6Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "MacAddress": "02:42:ac:10:56:02", - "DriverOpts": null - } - } - ...truncated - ``` - -4. Check out how the container sees its own network interfaces by running a - couple of `docker exec` commands. - - ```console - $ docker exec my-second-macvlan-alpine ip addr show eth0 - - 11: eth0@if10: mtu 1500 qdisc noqueue state UP - link/ether 02:42:ac:10:56:02 brd ff:ff:ff:ff:ff:ff - inet 172.16.86.2/24 brd 172.16.86.255 scope global eth0 - valid_lft forever preferred_lft forever - ``` - - ```console - $ docker exec my-second-macvlan-alpine ip route - - default via 172.16.86.1 dev eth0 - 172.16.86.0/24 dev eth0 scope link src 172.16.86.2 - ``` - -5. Stop the container (Docker removes it because of the `--rm` flag), and remove - the network. - - ```console - $ docker container stop my-second-macvlan-alpine - - $ docker network rm my-8021q-macvlan-net - ``` - -## Other networking tutorials - -- [Standalone networking tutorial](/manuals/engine/network/tutorials/standalone.md) -- [Overlay networking tutorial](/manuals/engine/network/tutorials/overlay.md) -- [Host networking tutorial](/manuals/engine/network/tutorials/host.md) diff --git a/content/manuals/engine/network/tutorials/overlay.md b/content/manuals/engine/network/tutorials/overlay.md deleted file mode 100644 index 0fbcc7d2ae8..00000000000 --- a/content/manuals/engine/network/tutorials/overlay.md +++ /dev/null @@ -1,442 +0,0 @@ ---- -title: Networking with overlay networks -description: Tutorials for networking with swarm services and standalone containers - on multiple Docker daemons -keywords: networking, bridge, routing, ports, swarm, overlay -aliases: -- /engine/userguide/networking/get-started-overlay/ -- /network/network-tutorial-overlay/ ---- - -This series of tutorials deals with networking for swarm services. -For networking with standalone containers, see -[Networking with standalone containers](/manuals/engine/network/tutorials/standalone.md). If you need to -learn more about Docker networking in general, see the [overview](/manuals/engine/network/_index.md). - -This page includes the following tutorials. You can run each of them on -Linux, Windows, or a Mac, but for the last one, you need a second Docker -host running elsewhere. - -- [Use the default overlay network](#use-the-default-overlay-network) demonstrates - how to use the default overlay network that Docker sets up for you - automatically when you initialize or join a swarm. This network is not the - best choice for production systems. - -- [Use user-defined overlay networks](#use-a-user-defined-overlay-network) shows - how to create and use your own custom overlay networks, to connect services. - This is recommended for services running in production. - -- [Use an overlay network for standalone containers](#use-an-overlay-network-for-standalone-containers) - shows how to communicate between standalone containers on different Docker - daemons using an overlay network. - -## Prerequisites - -These require you to have at least a single-node swarm, which means that -you have started Docker and run `docker swarm init` on the host. You can run -the examples on a multi-node swarm as well. - -## Use the default overlay network - -In this example, you start an `alpine` service and examine the characteristics -of the network from the point of view of the individual service containers. - -This tutorial does not go into operation-system-specific details about how -overlay networks are implemented, but focuses on how the overlay functions from -the point of view of a service. - -### Prerequisites - -This tutorial requires three physical or virtual Docker hosts which can all -communicate with one another. This tutorial assumes that the three hosts are -running on the same network with no firewall involved. - -These hosts will be referred to as `manager`, `worker-1`, and `worker-2`. The -`manager` host will function as both a manager and a worker, which means it can -both run service tasks and manage the swarm. `worker-1` and `worker-2` will -function as workers only, - -If you don't have three hosts handy, an easy solution is to set up three -Ubuntu hosts on a cloud provider such as Amazon EC2, all on the same network -with all communications allowed to all hosts on that network (using a mechanism -such as EC2 security groups), and then to follow the -[installation instructions for Docker Engine - Community on Ubuntu](/manuals/engine/install/ubuntu.md). - -### Walkthrough - -#### Create the swarm - -At the end of this procedure, all three Docker hosts will be joined to the swarm -and will be connected together using an overlay network called `ingress`. - -1. On `manager`. initialize the swarm. If the host only has one network - interface, the `--advertise-addr` flag is optional. - - ```console - $ docker swarm init --advertise-addr= - ``` - - Make a note of the text that is printed, as this contains the token that - you will use to join `worker-1` and `worker-2` to the swarm. It is a good - idea to store the token in a password manager. - -2. On `worker-1`, join the swarm. If the host only has one network interface, - the `--advertise-addr` flag is optional. - - ```console - $ docker swarm join --token \ - --advertise-addr \ - :2377 - ``` - -3. On `worker-2`, join the swarm. If the host only has one network interface, - the `--advertise-addr` flag is optional. - - ```console - $ docker swarm join --token \ - --advertise-addr \ - :2377 - ``` - -4. On `manager`, list all the nodes. This command can only be done from a - manager. - - ```console - $ docker node ls - - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - d68ace5iraw6whp7llvgjpu48 * ip-172-31-34-146 Ready Active Leader - nvp5rwavvb8lhdggo8fcf7plg ip-172-31-35-151 Ready Active - ouvx2l7qfcxisoyms8mtkgahw ip-172-31-36-89 Ready Active - ``` - - You can also use the `--filter` flag to filter by role: - - ```console - $ docker node ls --filter role=manager - - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - d68ace5iraw6whp7llvgjpu48 * ip-172-31-34-146 Ready Active Leader - - $ docker node ls --filter role=worker - - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - nvp5rwavvb8lhdggo8fcf7plg ip-172-31-35-151 Ready Active - ouvx2l7qfcxisoyms8mtkgahw ip-172-31-36-89 Ready Active - ``` - -5. List the Docker networks on `manager`, `worker-1`, and `worker-2` and notice - that each of them now has an overlay network called `ingress` and a bridge - network called `docker_gwbridge`. Only the listing for `manager` is shown - here: - - ```console - $ docker network ls - - NETWORK ID NAME DRIVER SCOPE - 495c570066be bridge bridge local - 961c6cae9945 docker_gwbridge bridge local - ff35ceda3643 host host local - trtnl4tqnc3n ingress overlay swarm - c8357deec9cb none null local - ``` - -The `docker_gwbridge` connects the `ingress` network to the Docker host's -network interface so that traffic can flow to and from swarm managers and -workers. If you create swarm services and do not specify a network, they are -connected to the `ingress` network. It is recommended that you use separate -overlay networks for each application or group of applications which will work -together. In the next procedure, you will create two overlay networks and -connect a service to each of them. - -#### Create the services - -1. On `manager`, create a new overlay network called `nginx-net`: - - ```console - $ docker network create -d overlay nginx-net - ``` - - You don't need to create the overlay network on the other nodes, because it - will be automatically created when one of those nodes starts running a - service task which requires it. - -2. On `manager`, create a 5-replica Nginx service connected to `nginx-net`. The - service will publish port 80 to the outside world. All of the service - task containers can communicate with each other without opening any ports. - - > [!NOTE] - > - > Services can only be created on a manager. - - ```console - $ docker service create \ - --name my-nginx \ - --publish target=80,published=80 \ - --replicas=5 \ - --network nginx-net \ - nginx - ``` - - The default publish mode of `ingress`, which is used when you do not - specify a `mode` for the `--publish` flag, means that if you browse to - port 80 on `manager`, `worker-1`, or `worker-2`, you will be connected to - port 80 on one of the 5 service tasks, even if no tasks are currently - running on the node you browse to. If you want to publish the port using - `host` mode, you can add `mode=host` to the `--publish` output. However, - you should also use `--mode global` instead of `--replicas=5` in this case, - since only one service task can bind a given port on a given node. - -3. Run `docker service ls` to monitor the progress of service bring-up, which - may take a few seconds. - -4. Inspect the `nginx-net` network on `manager`, `worker-1`, and `worker-2`. - Remember that you did not need to create it manually on `worker-1` and - `worker-2` because Docker created it for you. The output will be long, but - notice the `Containers` and `Peers` sections. `Containers` lists all - service tasks (or standalone containers) connected to the overlay network - from that host. - -5. From `manager`, inspect the service using `docker service inspect my-nginx` - and notice the information about the ports and endpoints used by the - service. - -6. Create a new network `nginx-net-2`, then update the service to use this - network instead of `nginx-net`: - - ```console - $ docker network create -d overlay nginx-net-2 - ``` - - ```console - $ docker service update \ - --network-add nginx-net-2 \ - --network-rm nginx-net \ - my-nginx - ``` - -7. Run `docker service ls` to verify that the service has been updated and all - tasks have been redeployed. Run `docker network inspect nginx-net` to verify - that no containers are connected to it. Run the same command for - `nginx-net-2` and notice that all the service task containers are connected - to it. - - > [!NOTE] - > - > Even though overlay networks are automatically created on swarm - > worker nodes as needed, they are not automatically removed. - -8. Clean up the service and the networks. From `manager`, run the following - commands. The manager will direct the workers to remove the networks - automatically. - - ```console - $ docker service rm my-nginx - $ docker network rm nginx-net nginx-net-2 - ``` - -## Use a user-defined overlay network - -### Prerequisites - -This tutorial assumes the swarm is already set up and you are on a manager. - -### Walkthrough - -1. Create the user-defined overlay network. - - ```console - $ docker network create -d overlay my-overlay - ``` - -2. Start a service using the overlay network and publishing port 80 to port - 8080 on the Docker host. - - ```console - $ docker service create \ - --name my-nginx \ - --network my-overlay \ - --replicas 1 \ - --publish published=8080,target=80 \ - nginx:latest - ``` - -3. Run `docker network inspect my-overlay` and verify that the `my-nginx` - service task is connected to it, by looking at the `Containers` section. - -4. Remove the service and the network. - - ```console - $ docker service rm my-nginx - - $ docker network rm my-overlay - ``` - -## Use an overlay network for standalone containers - -This example demonstrates DNS container discovery -- specifically, how to -communicate between standalone containers on different Docker daemons using an -overlay network. Steps are: - -- On `host1`, initialize the node as a swarm (manager). -- On `host2`, join the node to the swarm (worker). -- On `host1`, create an attachable overlay network (`test-net`). -- On `host1`, run an interactive [alpine](https://hub.docker.com/_/alpine/) container (`alpine1`) on `test-net`. -- On `host2`, run an interactive, and detached, [alpine](https://hub.docker.com/_/alpine/) container (`alpine2`) on `test-net`. -- On `host1`, from within a session of `alpine1`, ping `alpine2`. - -### Prerequisites - -For this test, you need two different Docker hosts that can communicate with -each other. Each host must have the following ports open between the two Docker -hosts: - -- TCP port 2377 -- TCP and UDP port 7946 -- UDP port 4789 - -One easy way to set this up is to have two VMs (either local or on a cloud -provider like AWS), each with Docker installed and running. If you're using AWS -or a similar cloud computing platform, the easiest configuration is to use a -security group that opens all incoming ports between the two hosts and the SSH -port from your client's IP address. - -This example refers to the two nodes in our swarm as `host1` and `host2`. This -example also uses Linux hosts, but the same commands work on Windows. - -### Walk-through - -1. Set up the swarm. - - a. On `host1`, initialize a swarm (and if prompted, use `--advertise-addr` - to specify the IP address for the interface that communicates with other - hosts in the swarm, for instance, the private IP address on AWS): - - - ```console - $ docker swarm init - Swarm initialized: current node (vz1mm9am11qcmo979tlrlox42) is now a manager. - - To add a worker to this swarm, run the following command: - - docker swarm join --token SWMTKN-1-5g90q48weqrtqryq4kj6ow0e8xm9wmv9o6vgqc5j320ymybd5c-8ex8j0bc40s6hgvy5ui5gl4gy 172.31.47.252:2377 - - To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. - ``` - - b. On `host2`, join the swarm as instructed above: - - ```console - $ docker swarm join --token :2377 - This node joined a swarm as a worker. - ``` - - If the node fails to join the swarm, the `docker swarm join` command times - out. To resolve, run `docker swarm leave --force` on `host2`, verify your - network and firewall settings, and try again. - -2. On `host1`, create an attachable overlay network called `test-net`: - - ```console - $ docker network create --driver=overlay --attachable test-net - uqsof8phj3ak0rq9k86zta6ht - ``` - - > Notice the returned **NETWORK ID** -- you will see it again when you connect to it from `host2`. - -3. On `host1`, start an interactive (`-it`) container (`alpine1`) that connects to `test-net`: - - ```console - $ docker run -it --name alpine1 --network test-net alpine - / # - ``` - -4. On `host2`, list the available networks -- notice that `test-net` does not yet exist: - - ```console - $ docker network ls - NETWORK ID NAME DRIVER SCOPE - ec299350b504 bridge bridge local - 66e77d0d0e9a docker_gwbridge bridge local - 9f6ae26ccb82 host host local - omvdxqrda80z ingress overlay swarm - b65c952a4b2b none null local - ``` - -5. On `host2`, start a detached (`-d`) and interactive (`-it`) container (`alpine2`) that connects to `test-net`: - - ```console - $ docker run -dit --name alpine2 --network test-net alpine - fb635f5ece59563e7b8b99556f816d24e6949a5f6a5b1fbd92ca244db17a4342 - ``` - - > [!NOTE] - > - > Automatic DNS container discovery only works with unique container names. - -6. On `host2`, verify that `test-net` was created (and has the same NETWORK ID as `test-net` on `host1`): - - ```console - $ docker network ls - NETWORK ID NAME DRIVER SCOPE - ... - uqsof8phj3ak test-net overlay swarm - ``` - -7. On `host1`, ping `alpine2` within the interactive terminal of `alpine1`: - - ```console - / # ping -c 2 alpine2 - PING alpine2 (10.0.0.5): 56 data bytes - 64 bytes from 10.0.0.5: seq=0 ttl=64 time=0.600 ms - 64 bytes from 10.0.0.5: seq=1 ttl=64 time=0.555 ms - - --- alpine2 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.555/0.577/0.600 ms - ``` - - The two containers communicate with the overlay network connecting the two - hosts. If you run another alpine container on `host2` that is _not detached_, - you can ping `alpine1` from `host2` (and here we add the - [remove option](/reference/cli/docker/container/run/#rm) for automatic container cleanup): - - ```sh - $ docker run -it --rm --name alpine3 --network test-net alpine - / # ping -c 2 alpine1 - / # exit - ``` - -8. On `host1`, close the `alpine1` session (which also stops the container): - - ```console - / # exit - ``` - -9. Clean up your containers and networks: - - You must stop and remove the containers on each host independently because - Docker daemons operate independently and these are standalone containers. - You only have to remove the network on `host1` because when you stop - `alpine2` on `host2`, `test-net` disappears. - - a. On `host2`, stop `alpine2`, check that `test-net` was removed, then remove `alpine2`: - - ```console - $ docker container stop alpine2 - $ docker network ls - $ docker container rm alpine2 - ``` - - a. On `host1`, remove `alpine1` and `test-net`: - - ```console - $ docker container rm alpine1 - $ docker network rm test-net - ``` - -## Other networking tutorials - -- [Host networking tutorial](/manuals/engine/network/tutorials/host.md) -- [Standalone networking tutorial](/manuals/engine/network/tutorials/standalone.md) -- [Macvlan networking tutorial](/manuals/engine/network/tutorials/macvlan.md) diff --git a/content/manuals/engine/network/tutorials/standalone.md b/content/manuals/engine/network/tutorials/standalone.md deleted file mode 100644 index 3e4c4a09d2a..00000000000 --- a/content/manuals/engine/network/tutorials/standalone.md +++ /dev/null @@ -1,622 +0,0 @@ ---- -title: Networking with standalone containers -description: Tutorials for networking with standalone containers -keywords: networking, bridge, routing, ports, overlay -aliases: - - /network/network-tutorial-standalone/ ---- - -This series of tutorials deals with networking for standalone Docker containers. -For networking with swarm services, see -[Networking with swarm services](/manuals/engine/network/tutorials/overlay.md). If you need to -learn more about Docker networking in general, see the [overview](/manuals/engine/network/_index.md). - -This topic includes two different tutorials. You can run each of them on -Linux, Windows, or a Mac, but for the last one, you need a second Docker -host running elsewhere. - -- [Use the default bridge network](#use-the-default-bridge-network) demonstrates - how to use the default `bridge` network that Docker sets up for you - automatically. This network is not the best choice for production systems. - -- [Use user-defined bridge networks](#use-user-defined-bridge-networks) shows - how to create and use your own custom bridge networks, to connect containers - running on the same Docker host. This is recommended for standalone containers - running in production. - -Although [overlay networks](/manuals/engine/network/drivers/overlay.md) are generally used for swarm services, -you can also use an overlay network for standalone containers. That's covered as -part of the [tutorial on using overlay networks](/manuals/engine/network/tutorials/overlay.md#use-an-overlay-network-for-standalone-containers). - -## Use the default bridge network - -In this example, you start two different `alpine` containers on the same Docker -host and do some tests to understand how they communicate with each other. You -need to have Docker installed and running. - -1. Open a terminal window. List current networks before you do anything else. - Here's what you should see if you've never added a network or initialized a - swarm on this Docker daemon. You may see different networks, but you should - at least see these (the network IDs will be different): - - ```console - $ docker network ls - - NETWORK ID NAME DRIVER SCOPE - 17e324f45964 bridge bridge local - 6ed54d316334 host host local - 7092879f2cc8 none null local - ``` - - The default `bridge` network is listed, along with `host` and `none`. The - latter two are not fully-fledged networks, but are used to start a container - connected directly to the Docker daemon host's networking stack, or to start - a container with no network devices. This tutorial will connect two - containers to the `bridge` network. - -2. Start two `alpine` containers running `ash`, which is Alpine's default shell - rather than `bash`. The `-dit` flags mean to start the container detached - (in the background), interactive (with the ability to type into it), and - with a TTY (so you can see the input and output). Since you are starting it - detached, you won't be connected to the container right away. Instead, the - container's ID will be printed. Because you have not specified any - `--network` flags, the containers connect to the default `bridge` network. - - ```console - $ docker run -dit --name alpine1 alpine ash - - $ docker run -dit --name alpine2 alpine ash - ``` - - Check that both containers are actually started: - - ```console - $ docker container ls - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 602dbf1edc81 alpine "ash" 4 seconds ago Up 3 seconds alpine2 - da33b7aa74b0 alpine "ash" 17 seconds ago Up 16 seconds alpine1 - ``` - -3. Inspect the `bridge` network to see what containers are connected to it. - - ```console - $ docker network inspect bridge - - [ - { - "Name": "bridge", - "Id": "17e324f459648a9baaea32b248d3884da102dde19396c25b30ec800068ce6b10", - "Created": "2017-06-22T20:27:43.826654485Z", - "Scope": "local", - "Driver": "bridge", - "EnableIPv6": false, - "IPAM": { - "Driver": "default", - "Options": null, - "Config": [ - { - "Subnet": "172.17.0.0/16", - "Gateway": "172.17.0.1" - } - ] - }, - "Internal": false, - "Attachable": false, - "Containers": { - "602dbf1edc81813304b6cf0a647e65333dc6fe6ee6ed572dc0f686a3307c6a2c": { - "Name": "alpine2", - "EndpointID": "03b6aafb7ca4d7e531e292901b43719c0e34cc7eef565b38a6bf84acf50f38cd", - "MacAddress": "02:42:ac:11:00:03", - "IPv4Address": "172.17.0.3/16", - "IPv6Address": "" - }, - "da33b7aa74b0bf3bda3ebd502d404320ca112a268aafe05b4851d1e3312ed168": { - "Name": "alpine1", - "EndpointID": "46c044a645d6afc42ddd7857d19e9dcfb89ad790afb5c239a35ac0af5e8a5bc5", - "MacAddress": "02:42:ac:11:00:02", - "IPv4Address": "172.17.0.2/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.bridge.default_bridge": "true", - "com.docker.network.bridge.enable_icc": "true", - "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", - "com.docker.network.bridge.name": "docker0", - "com.docker.network.driver.mtu": "1500" - }, - "Labels": {} - } - ] - ``` - - Near the top, information about the `bridge` network is listed, including - the IP address of the gateway between the Docker host and the `bridge` - network (`172.17.0.1`). Under the `Containers` key, each connected container - is listed, along with information about its IP address (`172.17.0.2` for - `alpine1` and `172.17.0.3` for `alpine2`). - -4. The containers are running in the background. Use the `docker attach` - command to connect to `alpine1`. - - ```console - $ docker attach alpine1 - - / # - ``` - - The prompt changes to `#` to indicate that you are the `root` user within - the container. Use the `ip addr show` command to show the network interfaces - for `alpine1` as they look from within the container: - - ```console - # ip addr show - - 1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1 - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 127.0.0.1/8 scope host lo - valid_lft forever preferred_lft forever - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever - 27: eth0@if28: mtu 1500 qdisc noqueue state UP - link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff - inet 172.17.0.2/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 fe80::42:acff:fe11:2/64 scope link - valid_lft forever preferred_lft forever - ``` - - The first interface is the loopback device. Ignore it for now. Notice that - the second interface has the IP address `172.17.0.2`, which is the same - address shown for `alpine1` in the previous step. - -5. From within `alpine1`, make sure you can connect to the internet by - pinging `google.com`. The `-c 2` flag limits the command to two `ping` - attempts. - - ```console - # ping -c 2 google.com - - PING google.com (172.217.3.174): 56 data bytes - 64 bytes from 172.217.3.174: seq=0 ttl=41 time=9.841 ms - 64 bytes from 172.217.3.174: seq=1 ttl=41 time=9.897 ms - - --- google.com ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 9.841/9.869/9.897 ms - ``` - -6. Now try to ping the second container. First, ping it by its IP address, - `172.17.0.3`: - - ```console - # ping -c 2 172.17.0.3 - - PING 172.17.0.3 (172.17.0.3): 56 data bytes - 64 bytes from 172.17.0.3: seq=0 ttl=64 time=0.086 ms - 64 bytes from 172.17.0.3: seq=1 ttl=64 time=0.094 ms - - --- 172.17.0.3 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.086/0.090/0.094 ms - ``` - - This succeeds. Next, try pinging the `alpine2` container by container - name. This will fail. - - ```console - # ping -c 2 alpine2 - - ping: bad address 'alpine2' - ``` - -7. Detach from `alpine1` without stopping it by using the detach sequence, - `CTRL` + `p` `CTRL` + `q` (hold down `CTRL` and type `p` followed by `q`). - If you wish, attach to `alpine2` and repeat steps 4, 5, and 6 there, - substituting `alpine1` for `alpine2`. - -8. Stop and remove both containers. - - ```console - $ docker container stop alpine1 alpine2 - $ docker container rm alpine1 alpine2 - ``` - -Remember, the default `bridge` network is not recommended for production. To -learn about user-defined bridge networks, continue to the -[next tutorial](#use-user-defined-bridge-networks). - -## Use user-defined bridge networks - -In this example, we again start two `alpine` containers, but attach them to a -user-defined network called `alpine-net` which we have already created. These -containers are not connected to the default `bridge` network at all. We then -start a third `alpine` container which is connected to the `bridge` network but -not connected to `alpine-net`, and a fourth `alpine` container which is -connected to both networks. - -1. Create the `alpine-net` network. You do not need the `--driver bridge` flag - since it's the default, but this example shows how to specify it. - - ```console - $ docker network create --driver bridge alpine-net - ``` - -2. List Docker's networks: - - ```console - $ docker network ls - - NETWORK ID NAME DRIVER SCOPE - e9261a8c9a19 alpine-net bridge local - 17e324f45964 bridge bridge local - 6ed54d316334 host host local - 7092879f2cc8 none null local - ``` - - Inspect the `alpine-net` network. This shows you its IP address and the fact - that no containers are connected to it: - - ```console - $ docker network inspect alpine-net - - [ - { - "Name": "alpine-net", - "Id": "e9261a8c9a19eabf2bf1488bf5f208b99b1608f330cff585c273d39481c9b0ec", - "Created": "2017-09-25T21:38:12.620046142Z", - "Scope": "local", - "Driver": "bridge", - "EnableIPv6": false, - "IPAM": { - "Driver": "default", - "Options": {}, - "Config": [ - { - "Subnet": "172.18.0.0/16", - "Gateway": "172.18.0.1" - } - ] - }, - "Internal": false, - "Attachable": false, - "Containers": {}, - "Options": {}, - "Labels": {} - } - ] - ``` - - Notice that this network's gateway is `172.18.0.1`, as opposed to the - default bridge network, whose gateway is `172.17.0.1`. The exact IP address - may be different on your system. - -3. Create your four containers. Notice the `--network` flags. You can only - connect to one network during the `docker run` command, so you need to use - `docker network connect` afterward to connect `alpine4` to the `bridge` - network as well. - - ```console - $ docker run -dit --name alpine1 --network alpine-net alpine ash - - $ docker run -dit --name alpine2 --network alpine-net alpine ash - - $ docker run -dit --name alpine3 alpine ash - - $ docker run -dit --name alpine4 --network alpine-net alpine ash - - $ docker network connect bridge alpine4 - ``` - - Verify that all containers are running: - - ```console - $ docker container ls - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 156849ccd902 alpine "ash" 41 seconds ago Up 41 seconds alpine4 - fa1340b8d83e alpine "ash" 51 seconds ago Up 51 seconds alpine3 - a535d969081e alpine "ash" About a minute ago Up About a minute alpine2 - 0a02c449a6e9 alpine "ash" About a minute ago Up About a minute alpine1 - ``` - -4. Inspect the `bridge` network and the `alpine-net` network again: - - ```console - $ docker network inspect bridge - - [ - { - "Name": "bridge", - "Id": "17e324f459648a9baaea32b248d3884da102dde19396c25b30ec800068ce6b10", - "Created": "2017-06-22T20:27:43.826654485Z", - "Scope": "local", - "Driver": "bridge", - "EnableIPv6": false, - "IPAM": { - "Driver": "default", - "Options": null, - "Config": [ - { - "Subnet": "172.17.0.0/16", - "Gateway": "172.17.0.1" - } - ] - }, - "Internal": false, - "Attachable": false, - "Containers": { - "156849ccd902b812b7d17f05d2d81532ccebe5bf788c9a79de63e12bb92fc621": { - "Name": "alpine4", - "EndpointID": "7277c5183f0da5148b33d05f329371fce7befc5282d2619cfb23690b2adf467d", - "MacAddress": "02:42:ac:11:00:03", - "IPv4Address": "172.17.0.3/16", - "IPv6Address": "" - }, - "fa1340b8d83eef5497166951184ad3691eb48678a3664608ec448a687b047c53": { - "Name": "alpine3", - "EndpointID": "5ae767367dcbebc712c02d49556285e888819d4da6b69d88cd1b0d52a83af95f", - "MacAddress": "02:42:ac:11:00:02", - "IPv4Address": "172.17.0.2/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.bridge.default_bridge": "true", - "com.docker.network.bridge.enable_icc": "true", - "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", - "com.docker.network.bridge.name": "docker0", - "com.docker.network.driver.mtu": "1500" - }, - "Labels": {} - } - ] - ``` - - Containers `alpine3` and `alpine4` are connected to the `bridge` network. - - ```console - $ docker network inspect alpine-net - - [ - { - "Name": "alpine-net", - "Id": "e9261a8c9a19eabf2bf1488bf5f208b99b1608f330cff585c273d39481c9b0ec", - "Created": "2017-09-25T21:38:12.620046142Z", - "Scope": "local", - "Driver": "bridge", - "EnableIPv6": false, - "IPAM": { - "Driver": "default", - "Options": {}, - "Config": [ - { - "Subnet": "172.18.0.0/16", - "Gateway": "172.18.0.1" - } - ] - }, - "Internal": false, - "Attachable": false, - "Containers": { - "0a02c449a6e9a15113c51ab2681d72749548fb9f78fae4493e3b2e4e74199c4a": { - "Name": "alpine1", - "EndpointID": "c83621678eff9628f4e2d52baf82c49f974c36c05cba152db4c131e8e7a64673", - "MacAddress": "02:42:ac:12:00:02", - "IPv4Address": "172.18.0.2/16", - "IPv6Address": "" - }, - "156849ccd902b812b7d17f05d2d81532ccebe5bf788c9a79de63e12bb92fc621": { - "Name": "alpine4", - "EndpointID": "058bc6a5e9272b532ef9a6ea6d7f3db4c37527ae2625d1cd1421580fd0731954", - "MacAddress": "02:42:ac:12:00:04", - "IPv4Address": "172.18.0.4/16", - "IPv6Address": "" - }, - "a535d969081e003a149be8917631215616d9401edcb4d35d53f00e75ea1db653": { - "Name": "alpine2", - "EndpointID": "198f3141ccf2e7dba67bce358d7b71a07c5488e3867d8b7ad55a4c695ebb8740", - "MacAddress": "02:42:ac:12:00:03", - "IPv4Address": "172.18.0.3/16", - "IPv6Address": "" - } - }, - "Options": {}, - "Labels": {} - } - ] - ``` - - Containers `alpine1`, `alpine2`, and `alpine4` are connected to the - `alpine-net` network. - -5. On user-defined networks like `alpine-net`, containers can not only - communicate by IP address, but can also resolve a container name to an IP - address. This capability is called automatic service discovery. Let's - connect to `alpine1` and test this out. `alpine1` should be able to resolve - `alpine2` and `alpine4` (and `alpine1`, itself) to IP addresses. - - > [!NOTE] - > - > Automatic service discovery can only resolve custom container names, not default automatically generated container names, - - ```console - $ docker container attach alpine1 - - # ping -c 2 alpine2 - - PING alpine2 (172.18.0.3): 56 data bytes - 64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.085 ms - 64 bytes from 172.18.0.3: seq=1 ttl=64 time=0.090 ms - - --- alpine2 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.085/0.087/0.090 ms - - # ping -c 2 alpine4 - - PING alpine4 (172.18.0.4): 56 data bytes - 64 bytes from 172.18.0.4: seq=0 ttl=64 time=0.076 ms - 64 bytes from 172.18.0.4: seq=1 ttl=64 time=0.091 ms - - --- alpine4 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.076/0.083/0.091 ms - - # ping -c 2 alpine1 - - PING alpine1 (172.18.0.2): 56 data bytes - 64 bytes from 172.18.0.2: seq=0 ttl=64 time=0.026 ms - 64 bytes from 172.18.0.2: seq=1 ttl=64 time=0.054 ms - - --- alpine1 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.026/0.040/0.054 ms - ``` - -6. From `alpine1`, you should not be able to connect to `alpine3` at all, since - it is not on the `alpine-net` network. - - ```console - # ping -c 2 alpine3 - - ping: bad address 'alpine3' - ``` - - Not only that, but you can't connect to `alpine3` from `alpine1` by its IP - address either. Look back at the `docker network inspect` output for the - `bridge` network and find `alpine3`'s IP address: `172.17.0.2` Try to ping - it. - - ```console - # ping -c 2 172.17.0.2 - - PING 172.17.0.2 (172.17.0.2): 56 data bytes - - --- 172.17.0.2 ping statistics --- - 2 packets transmitted, 0 packets received, 100% packet loss - ``` - - Detach from `alpine1` using detach sequence, - `CTRL` + `p` `CTRL` + `q` (hold down `CTRL` and type `p` followed by `q`). - -7. Remember that `alpine4` is connected to both the default `bridge` network - and `alpine-net`. It should be able to reach all of the other containers. - However, you will need to address `alpine3` by its IP address. Attach to it - and run the tests. - - ```console - $ docker container attach alpine4 - - # ping -c 2 alpine1 - - PING alpine1 (172.18.0.2): 56 data bytes - 64 bytes from 172.18.0.2: seq=0 ttl=64 time=0.074 ms - 64 bytes from 172.18.0.2: seq=1 ttl=64 time=0.082 ms - - --- alpine1 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.074/0.078/0.082 ms - - # ping -c 2 alpine2 - - PING alpine2 (172.18.0.3): 56 data bytes - 64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.075 ms - 64 bytes from 172.18.0.3: seq=1 ttl=64 time=0.080 ms - - --- alpine2 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.075/0.077/0.080 ms - - # ping -c 2 alpine3 - ping: bad address 'alpine3' - - # ping -c 2 172.17.0.2 - - PING 172.17.0.2 (172.17.0.2): 56 data bytes - 64 bytes from 172.17.0.2: seq=0 ttl=64 time=0.089 ms - 64 bytes from 172.17.0.2: seq=1 ttl=64 time=0.075 ms - - --- 172.17.0.2 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.075/0.082/0.089 ms - - # ping -c 2 alpine4 - - PING alpine4 (172.18.0.4): 56 data bytes - 64 bytes from 172.18.0.4: seq=0 ttl=64 time=0.033 ms - 64 bytes from 172.18.0.4: seq=1 ttl=64 time=0.064 ms - - --- alpine4 ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 0.033/0.048/0.064 ms - ``` - -8. As a final test, make sure your containers can all connect to the internet - by pinging `google.com`. You are already attached to `alpine4` so start by - trying from there. Next, detach from `alpine4` and connect to `alpine3` - (which is only attached to the `bridge` network) and try again. Finally, - connect to `alpine1` (which is only connected to the `alpine-net` network) - and try again. - - ```console - # ping -c 2 google.com - - PING google.com (172.217.3.174): 56 data bytes - 64 bytes from 172.217.3.174: seq=0 ttl=41 time=9.778 ms - 64 bytes from 172.217.3.174: seq=1 ttl=41 time=9.634 ms - - --- google.com ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 9.634/9.706/9.778 ms - - CTRL+p CTRL+q - - $ docker container attach alpine3 - - # ping -c 2 google.com - - PING google.com (172.217.3.174): 56 data bytes - 64 bytes from 172.217.3.174: seq=0 ttl=41 time=9.706 ms - 64 bytes from 172.217.3.174: seq=1 ttl=41 time=9.851 ms - - --- google.com ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 9.706/9.778/9.851 ms - - CTRL+p CTRL+q - - $ docker container attach alpine1 - - # ping -c 2 google.com - - PING google.com (172.217.3.174): 56 data bytes - 64 bytes from 172.217.3.174: seq=0 ttl=41 time=9.606 ms - 64 bytes from 172.217.3.174: seq=1 ttl=41 time=9.603 ms - - --- google.com ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max = 9.603/9.604/9.606 ms - - CTRL+p CTRL+q - ``` - -9. Stop and remove all containers and the `alpine-net` network. - - ```console - $ docker container stop alpine1 alpine2 alpine3 alpine4 - - $ docker container rm alpine1 alpine2 alpine3 alpine4 - - $ docker network rm alpine-net - ``` - - -## Other networking tutorials - -- [Host networking tutorial](/manuals/engine/network/tutorials/host.md) -- [Overlay networking tutorial](/manuals/engine/network/tutorials/overlay.md) -- [Macvlan networking tutorial](/manuals/engine/network/tutorials/macvlan.md) diff --git a/content/manuals/engine/release-notes/17.07.md b/content/manuals/engine/release-notes/17.07.md index 156a9970d9b..bce33a98a7e 100644 --- a/content/manuals/engine/release-notes/17.07.md +++ b/content/manuals/engine/release-notes/17.07.md @@ -61,7 +61,7 @@ toc_max: 2 ### Swarm mode -* Initial support for plugable secret backends [moby/moby#34157](https://github.com/moby/moby/pull/34157) [moby/moby#34123](https://github.com/moby/moby/pull/34123) +* Initial support for pluggable secret backends [moby/moby#34157](https://github.com/moby/moby/pull/34157) [moby/moby#34123](https://github.com/moby/moby/pull/34123) * Sort swarm stacks and nodes using natural sorting [docker/cli#315](https://github.com/docker/cli/pull/315) * Make engine support cluster config event [moby/moby#34032](https://github.com/moby/moby/pull/34032) * Only pass a join address when in the process of joining a cluster [moby/moby#33361](https://github.com/moby/moby/pull/33361) diff --git a/content/manuals/engine/release-notes/23.0.md b/content/manuals/engine/release-notes/23.0.md index c8536229d67..ae0b40abefb 100644 --- a/content/manuals/engine/release-notes/23.0.md +++ b/content/manuals/engine/release-notes/23.0.md @@ -29,7 +29,7 @@ This page describes the latest changes, additions, known issues, and fixes for D For more information about: - Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). -- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history/). Starting with the 23.0.0 release, Docker Engine moves away from using CalVer versioning, and starts using the [SemVer versioning format](https://semver.org/). diff --git a/content/manuals/engine/release-notes/24.0.md b/content/manuals/engine/release-notes/24.0.md index afd7c4ba4cf..16e20d36308 100644 --- a/content/manuals/engine/release-notes/24.0.md +++ b/content/manuals/engine/release-notes/24.0.md @@ -13,7 +13,7 @@ This page describes the latest changes, additions, known issues, and fixes for D For more information about: - Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). -- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history/). ## 24.0.9 diff --git a/content/manuals/engine/release-notes/25.0.md b/content/manuals/engine/release-notes/25.0.md index d000b7ab9c7..0d1f9282392 100644 --- a/content/manuals/engine/release-notes/25.0.md +++ b/content/manuals/engine/release-notes/25.0.md @@ -12,7 +12,7 @@ This page describes the latest changes, additions, known issues, and fixes for D For more information about: - Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). -- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history/). ## 25.0.5 @@ -217,9 +217,9 @@ For a full list of pull requests and changes in this release, refer to the relev `LimitNOFILE=1048576`. This change currently only affects build containers created with `docker - build` when using BuildKit with the `docker` driver. Future versions of - containerd will also use this limit, which will cause this behavior to affect - all containers, not only build containers. + build` when using BuildKit with the `docker` driver. Starting with Docker + Engine v29.0 (containerd v2.1.5), this limit applies to all containers, not + only build containers. If you're experiencing issues with the higher ulimit in systemd v240 or later, consider adding a system `drop-in` or `override` file to configure the ulimit diff --git a/content/manuals/engine/release-notes/26.0.md b/content/manuals/engine/release-notes/26.0.md index eaf1150b13a..7654b2455a0 100644 --- a/content/manuals/engine/release-notes/26.0.md +++ b/content/manuals/engine/release-notes/26.0.md @@ -12,7 +12,7 @@ This page describes the latest changes, additions, known issues, and fixes for D For more information about: - Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). -- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history/). ## 26.0.2 diff --git a/content/manuals/engine/release-notes/26.1.md b/content/manuals/engine/release-notes/26.1.md index 8762b1e07aa..aad9d59cc50 100644 --- a/content/manuals/engine/release-notes/26.1.md +++ b/content/manuals/engine/release-notes/26.1.md @@ -12,7 +12,7 @@ This page describes the latest changes, additions, known issues, and fixes for D For more information about: - Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). -- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history/). ## 26.1.4 diff --git a/content/manuals/engine/release-notes/27.md b/content/manuals/engine/release-notes/27.md index a90a7b73dc7..70c4bd0168a 100644 --- a/content/manuals/engine/release-notes/27.md +++ b/content/manuals/engine/release-notes/27.md @@ -9,7 +9,10 @@ tags: - Release notes aliases: - /engine/release-notes/27.1/ -- /engine/release-notes/27.0/ +- /engine/release-notes/27.2/ +- /engine/release-notes/27.3/ +- /engine/release-notes/27.4/ +- /engine/release-notes/27.5/ --- This page describes the latest changes, additions, known issues, and fixes for Docker Engine version 27. @@ -17,7 +20,7 @@ This page describes the latest changes, additions, known issues, and fixes for D For more information about: - Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). -- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history/). ## 27.5 diff --git a/content/manuals/engine/release-notes/28.md b/content/manuals/engine/release-notes/28.md index 015be60b89b..a3134166bcf 100644 --- a/content/manuals/engine/release-notes/28.md +++ b/content/manuals/engine/release-notes/28.md @@ -8,12 +8,12 @@ toc_max: 2 tags: - Release notes aliases: -- /engine/release-notes/ -- /engine/release-notes/latest/ -- /release-notes/docker-ce/ -- /release-notes/docker-engine/ - /engine/release-notes/28.0/ - /engine/release-notes/28.1/ +- /engine/release-notes/28.2/ +- /engine/release-notes/28.3/ +- /engine/release-notes/28.4/ +- /engine/release-notes/28.5/ --- This page describes the latest changes, additions, known issues, and fixes for Docker Engine version 28. @@ -21,7 +21,465 @@ This page describes the latest changes, additions, known issues, and fixes for D For more information about: - Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). -- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history/). + +## 28.5.2 + +{{< release-date date="2025-11-05" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.5.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.5.2) +- [moby/moby, 28.5.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.5.2) + +> [!CAUTION] +> This release contains fixes for three high-severity security vulnerabilities in runc: +> - [CVE-2025-31133](https://github.com/opencontainers/runc/security/advisories/GHSA-9493-h29p-rfm2) +> - [CVE-2025-52565](https://github.com/opencontainers/runc/security/advisories/GHSA-qw9x-cqr3-wc7r) +> - [CVE-2025-52881](https://github.com/opencontainers/runc/security/advisories/GHSA-cgrx-mc8f-2prm) +> +> All three vulnerabilities ultimately allow (through different methods) for full container breakouts by bypassing runc's restrictions for writing to arbitrary `/proc` files. + +### Bug fixes and enhancements + +- dockerd-rootless.sh: if slirp4netns is not installed, try using pasta (passt). [moby/moby#51162](https://github.com/moby/moby/pull/51162) + +### Packaging updates + +- Update BuildKit to [v0.25.2](https://github.com/moby/buildkit/releases/tag/v0.25.2). [moby/moby#51398](https://github.com/moby/moby/pull/51398) +- Update Go runtime to [1.24.9](https://go.dev/doc/devel/release#go1.24.9). [moby/moby#51387](https://github.com/moby/moby/pull/51387), [docker/cli#6613](https://github.com/docker/cli/pull/6613) +- Update runc to [v1.3.3](https://github.com/opencontainers/runc/releases/tag/v1.3.3). [moby/moby#51394](https://github.com/moby/moby/pull/51394) + +### Deprecations + +- Go-SDK: cli/command/image/build: deprecate `DefaultDockerfileName`, `DetectArchiveReader`, `WriteTempDockerfile`, `ResolveAndValidateContextPath`. These utilities were only used internally and will be removed in the next release. [docker/cli#6610](https://github.com/docker/cli/pull/6610) +- Go-SDK: cli/command/image/build: deprecate IsArchive utility. [docker/cli#6560](https://github.com/docker/cli/pull/6560) +- Go-SDK: opts: deprecate `ValidateMACAddress`. [docker/cli#6560](https://github.com/docker/cli/pull/6560) +- Go-SDK: opts: deprecate ListOpts.Delete(). [docker/cli#6560](https://github.com/docker/cli/pull/6560) + +## 28.5.1 + +{{< release-date date="2025-10-08" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.5.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.5.1) +- [moby/moby, 28.5.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.5.1) + +### Bug fixes and enhancements + +- Update BuildKit to v0.25.1. [moby/moby#51137](https://github.com/moby/moby/pull/51137) +- Update Go runtime to [1.24.8](https://go.dev/doc/devel/release#go1.24.8). [moby/moby#51133](https://github.com/moby/moby/pull/51133), [docker/cli#6541](https://github.com/docker/cli/pull/6541) + +### Deprecations + +- api/types/image: InspectResponse: deprecate `Parent` and `DockerVersion` fields. [moby/moby#51105](https://github.com/moby/moby/pull/51105) +- api/types/plugin: deprecate `Config.DockerVersion` field. [moby/moby#51110](https://github.com/moby/moby/pull/51110) + +## 28.5.0 + +{{< release-date date="2025-10-02" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.5.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.5.0) +- [moby/moby, 28.5.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.5.0) + +> [!WARNING] +> **Raspberry Pi OS 32-bit (armhf) Deprecation** +> +> Docker Engine v28 will be the last major version to support Raspberry Pi OS 32-bit (armhf). +> Starting with Docker Engine v29, new major versions will **no longer provide packages** for Raspberry Pi OS 32-bit (armhf). +> +> #### Migration options +> - **64-bit ARM:** Install the Debian `arm64` packages (fully supported). +> - **32-bit ARM (v7):** Install the Debian `armhf` packages (targets ARMv7 CPUs). +> +> **Note:** Older devices based on the ARMv6 architecture are no longer supported by official packages, including: +> - Raspberry Pi 1 (Model A/B/A+/B+) +> - Raspberry Pi Zero and Zero W + +### Bug fixes and enhancements + +- Don't print warnings in `docker info` for broken symlinks in CLI-plugin directories. [docker/cli#6476](https://github.com/docker/cli/pull/6476) +- Fix a panic during `stats` on empty event `Actor.ID`. [docker/cli#6471](https://github.com/docker/cli/pull/6471) + +### Packaging updates + +- Remove support for legacy CBC cipher suites. [docker/cli#6474](https://github.com/docker/cli/pull/6474) +- Update Buildkit to [v0.25.0](https://github.com/moby/buildkit/releases/tag/v0.25.0). [moby/moby#51075](https://github.com/moby/moby/pull/51075) +- Update Dockerfile syntax to [v1.19.0](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.19.0). [moby/moby#51075](https://github.com/moby/moby/pull/51075) + +### Networking + +- Eliminated harmless warning about deletion of `endpoint_count` from the data store. [moby/moby#51064](https://github.com/moby/moby/pull/51064) +- Fix a bug causing IPAM plugins to not be loaded on Windows. [moby/moby#51035](https://github.com/moby/moby/pull/51035) + +### API + +- Deprecate support for kernel memory TCP accounting (`KernelMemoryTCP`). [moby/moby#51067](https://github.com/moby/moby/pull/51067) +- Fix `GET containers/{name}/checkpoints` returning `null` instead of empty JSON array when there are no checkpoints. [moby/moby#51052](https://github.com/moby/moby/pull/51052) + +### Go SDK + +- cli-plugins/plugin: Run: allow customizing the CLI. [docker/cli#6481](https://github.com/docker/cli/pull/6481) +- cli/command: add `WithUserAgent` option. [docker/cli#6477](https://github.com/docker/cli/pull/6477) + +### Deprecations + +- Go-SDK: cli/command: deprecate `DockerCli.Apply`. This method is no longer used and will be removed in the next release if there are no remaining uses. [docker/cli#6497](https://github.com/docker/cli/pull/6497) +- Go-SDK: cli/command: deprecate `DockerCli.ContentTrustEnabled`. This method is no longer used and will be removed in the next release. [docker/cli#6495](https://github.com/docker/cli/pull/6495) +- Go-SDK: cli/command: deprecate `DockerCli.DefaultVersion`. This method is no longer used and will be removed in the next release. [docker/cli#6491](https://github.com/docker/cli/pull/6491) +- Go-SDK: cli/command: deprecate `ResolveDefaultContext` utility. [docker/cli#6529](https://github.com/docker/cli/pull/6529) +- Go-SDK: cli/command: deprecate `WithContentTrustFromEnv`, `WithContentTrust` options. These options were used internally, and will be removed in the next release.. [docker/cli#6489](https://github.com/docker/cli/pull/6489) +- Go-SDK: cli/manifest/store: deprecate `IsNotFound()`. [docker/cli#6514](https://github.com/docker/cli/pull/6514) +- Go-SDK: templates: deprecate NewParse() function. [docker/cli#6469](https://github.com/docker/cli/pull/6469) + +## 28.4.0 + +{{< release-date date="2025-09-03" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.4.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.4.0) +- [moby/moby, 28.4.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.4.0) + +### New + +- Allow Docker CLI to set the `GODEBUG` environment variable when the key-value pair (`"GODEBUG":"..."`) exists inside the docker context metadata. [docker/cli#6399](https://github.com/docker/cli/pull/6399) + +### Bug fixes and enhancements + +- Add shell completion for `docker pull` and `docker image pull`. [docker/cli#6420](https://github.com/docker/cli/pull/6420) +- Fix a regression in v28.3.3 that could cause a panic on `docker push` if the client did not send an `X-Registry-Auth` header. [moby/moby#50738](https://github.com/moby/moby/pull/50738) +- Windows: Potentially fix an issue with "access denied" error when pulling images. [moby/moby#50871](https://github.com/moby/moby/pull/50871) +- containerd image store: Fix `docker history` failing with `snapshot X does not exist` when calling on a non-native image that was built locally. [moby/moby#50875](https://github.com/moby/moby/pull/50875) +- containerd image store: Fix `docker image prune` to emit correct `untag` and `delete` events and list only the deleted images root digests instead of every blob. [moby/moby#50837](https://github.com/moby/moby/pull/50837) +- Remove interactive login prompt from `docker push` and `docker pull` after a failure caused by missing authentication. [docker/cli#6256](https://github.com/docker/cli/pull/6256) + +### Packaging updates + +- Update BuildKit to [v0.24.0](https://github.com/moby/buildkit/releases/tag/v0.24.0). [moby#50888](https://github.com/moby/moby/pull/50888) +- Update Go runtime to [1.24.7](https://go.dev/doc/devel/release#go1.24.6). [moby/moby#50889](https://github.com/moby/moby/pull/50889), [docker/cli#6422](https://github.com/docker/cli/pull/6422) +- Update `runc` to [v1.3.0](https://github.com/opencontainers/runc/releases/tag/v1.3.0). [moby/moby#50699](https://github.com/moby/moby/pull/50699) +- Update containerd (static binaries only) to [v1.7.28](https://github.com/containerd/containerd/releases/tag/v1.7.28). [moby/moby#50700](https://github.com/moby/moby/pull/50700) + +### Networking + +- Fix an issue that could cause slow container restart on live-restore. [moby/moby#50829](https://github.com/moby/moby/pull/50829) + +### API + +- Update deprecation message for `AuthConfig.Email` field. [moby/moby#50797](https://github.com/moby/moby/pull/50797) + +### Go SDK + +- Deprecate profiles package which got migrated to [github.com/moby/profiles](https://github.com/moby/profiles). [moby/moby#50513](https://github.com/moby/moby/pull/50513) + +### Deprecations + +- Deprecate special handling for quoted values for the `--tlscacert`, `--tlscert`, and `--tlskey` command-line flags. [docker/cli#6291](https://github.com/docker/cli/pull/6291) +- Mark legacy links environment variables (`DOCKER_KEEP_DEPRECATED_LEGACY_LINKS_ENV_VARS`) as deprecated in v28.4 and set for removal in v30.0. [docker/cli#6309](https://github.com/docker/cli/pull/6309) +- Go-SDK: Deprecate field `NetworkSettingsBase.Bridge`, struct `NetworkSettingsBase`, all the fields of `DefaultNetworkSettings`, and struct `DefaultNetworkSettings`. [moby/moby#50839](https://github.com/moby/moby/pull/50839) +- Go-SDK: api/types: `build.CacheDiskUsage`, `container.DiskUsage`, `images.DiskUsage` and `volumes.DiskUsage` are now deprecated and will be removed in the next major release. [moby/moby#50768](https://github.com/moby/moby/pull/50768) +- Go-SDK: cli-plugins/manager: deprecate `ReexecEnvvar`. [docker/cli#6411](https://github.com/docker/cli/pull/6411) +- Go-SDK: cli-plugins/manager: deprecate annotation aliases (`CommandAnnotationPlugin`, `CommandAnnotationPluginVendor`, `CommandAnnotationPluginVersion`, `CommandAnnotationPluginInvalid`, `CommandAnnotationPluginCommandPath`) in favor of their equivalent in `cli-plugins/manager/metadata`. [docker/cli#6298](https://github.com/docker/cli/pull/6298) +- Go-SDK: cli-plugins/manager: deprecate metadata aliases (`NamePrefix`, `MetadataSubcommandName`, `HookSubcommandName`, `Metadata`, `ReexecEnvvar`) in favor of their equivalent in `cli-plugins/manager/metadata`. [docker/cli#6269](https://github.com/docker/cli/pull/6269) +- Go-SDK: cli-plugins/manager: remove `Candidate` interface, which was only for internal use. [docker/cli#6269](https://github.com/docker/cli/pull/6269) +- Go-SDK: cli-plugins/manager: remove `NewPluginError` function, which was only for internal use. [docker/cli#6269](https://github.com/docker/cli/pull/6269) +- Go-SDK: cli-plugins/manager: remove deprecated `ResourceAttributesEnvvar` const. [docker/cli#6269](https://github.com/docker/cli/pull/6269) +- Go-SDK: cli/command/builder: deprecate `NewBuilderCommand` and `NewBakeStubCommand`. These functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/builder: deprecate `NewPruneCommand`. [docker/cli#6343](https://github.com/docker/cli/pull/6343) +- Go-SDK: cli/command/checkpoint: deprecate `NewCheckpointCommand`. This function will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/checkpoint: deprecate `NewFormat`, `FormatWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/completion: deprecate `NoComplete`. [docker/cli#6405](https://github.com/docker/cli/pull/6405) +- Go-SDK: cli/command/completion: remove deprecated `ValidArgsFn`. [docker/cli#6259](https://github.com/docker/cli/pull/6259) +- Go-SDK: cli/command/config: deprecate `NewConfigCommand`. This function will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/config: deprecate `NewFormat`, `FormatWrite`, `InspectFormatWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/config: deprecate `RunConfigCreate`, `CreateOptions`, `RunConfigInspect`, `InspectOptions`, `RunConfigList`, `ListOptions`, `RunConfigRemove`, and `RemoveOptions`. [docker/cli#6369](https://github.com/docker/cli/pull/6369) +- Go-SDK: cli/command/container: deprecate `NewBuildCommand`, `NewPullCommand`, `NewPushCommand`, `NewImagesCommand`, `NewImageCommand`, `NewHistoryCommand`, `NewImportCommand`, `NewLoadCommand`, `NewRemoveCommand`, `NewSaveCommand`, `NewTagCommand`, `NewPruneCommand`. These functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/container: deprecate `NewDiffFormat`, `DiffFormatWrite`. These functions were only used internally and will be removed in the next release. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/container: deprecate `NewRunCommand`, `NewExecCommand`, `NewPsCommand`, `NewContainerCommand`, `NewAttachCommand`, `NewCommitCommand`, `NewCopyCommand`, `NewCreateCommand`, `NewDiffCommand`, `NewExportCommand`, `NewKillCommand`, `NewLogsCommand`, `NewPauseCommand`, `NewPortCommand`, `NewRenameCommand`, `NewRestartCommand`, `NewRmCommand`, `NewStartCommand`, `NewStatsCommand`, `NewStopCommand`, `NewTopCommand`, `NewUnpauseCommand`, `NewUpdateCommand`, `NewWaitCommand`, `NewPruneCommand`. These functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/context: deprecate `NewContextCommand`. This function will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/context: deprecate `RunCreate` and `CreateOptions`. [docker/cli#6403](https://github.com/docker/cli/pull/6403) +- Go-SDK: cli/command/context: deprecate `RunExport` and `ExportOptions`. [docker/cli#6403](https://github.com/docker/cli/pull/6403) +- Go-SDK: cli/command/context: deprecate `RunImport`. [docker/cli#6403](https://github.com/docker/cli/pull/6403) +- Go-SDK: cli/command/context: deprecate `RunRemove` and `RemoveOptions`. [docker/cli#6403](https://github.com/docker/cli/pull/6403) +- Go-SDK: cli/command/context: deprecate `RunUpdate` and `UpdateOptions`. [docker/cli#6403](https://github.com/docker/cli/pull/6403) +- Go-SDK: cli/command/context: deprecate `RunUse`. [docker/cli#6403](https://github.com/docker/cli/pull/6403) +- Go-SDK: cli/command/image: deprecate `AuthResolver` utility. [docker/cli#6357](https://github.com/docker/cli/pull/6357) +- Go-SDK: cli/command/image: deprecate `NewHistoryFormat`, `HistoryWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341), [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/manifest: deprecate `NewManifestCommand`. This functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/network: deprecate `NewFormat`, `FormatWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/network: deprecate `NewNetworkCommand`. These functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/node: deprecate `NewFormat`, `FormatWrite`, `InspectFormatWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/node: deprecate `NewNodeCommand`. This functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/plugin: deprecate `NewFormat`, `FormatWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/plugin: deprecate `NewPluginCommand`. This function will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/registry: deprecate `NewLoginCommand`, `NewLogoutCommand`, `NewSearchCommand`. These functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/registry: deprecate `NewSearchFormat`, `SearchWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/registry: deprecate `OauthLoginEscapeHatchEnvVar` const. [docker/cli#6413](https://github.com/docker/cli/pull/6413) +- Go-SDK: cli/command/secret: deprecate `NewFormat`, `FormatWrite`, `InspectFormatWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/secret: deprecate `NewSecretCommand`. This functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/service: deprecate `NewFormat`, `InspectFormatWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/service: deprecate `NewServiceCommand`. This function will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/stack: deprecate `NewStackCommand`. This function will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/stack: deprecate `RunList`, `RunServices`. [docker/cli#6391](https://github.com/docker/cli/pull/6391) +- Go-SDK: cli/command/swarm: deprecate `NewSwarmCommand`. This function will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/system: deprecate `NewVersionCommand`, `NewInfoCommand`, `NewSystemCommand`, `NewEventsCommand`, `NewInspectCommand`. These functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/task: deprecate `NewTaskFormat`, `FormatWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/trust: deprecate `NewTrustCommand`. This function will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command/trust: deprecate `SignedTagInfo`, `SignerInfo`, `NewTrustTagFormat`, `NewSignerInfoFormat`, `TagWrite`, `SignerInfoWrite`. [docker/cli#6341](https://github.com/docker/cli/pull/6341) +- Go-SDK: cli/command/volume: deprecate `NewVolumeCommand`, `NewPruneCommand`. These functions will be removed in the next release. [docker/cli#6312](https://github.com/docker/cli/pull/6312) +- Go-SDK: cli/command: remove `AddTrustSigningFlags`, `AddTrustVerificationFlags`, and `AddPlatformFlag` utilities, which were only used internally. [docker/cli#6311](https://github.com/docker/cli/pull/6311) +- Go-SDK: cli/command: remove deprecated `ConfigureAuth` utility. [docker/cli#6257](https://github.com/docker/cli/pull/6257) +- Go-SDK: cli/command: remove deprecated `CopyToFile` utility. [docker/cli#6257](https://github.com/docker/cli/pull/6257) +- Go-SDK: cli/config/types: update deprecation message for `AuthConfig.Email` field. [docker/cli#6392](https://github.com/docker/cli/pull/6392) +- Go-SDK: cli: deprecate `VisitAll`, `DisableFlagsInUseLine` utilities. These utilities were only used internally and will be removed in the next release. [docker/cli#6276](https://github.com/docker/cli/pull/6276) +- Go-SDK: cli: remove `HasCompletionArg` utility. This utility was only used internally. [docker/cli#6276](https://github.com/docker/cli/pull/6276) +- Go-SDK: deprecate `cli/command.RegistryAuthenticationPrivilegedFunc`. [docker/cli#6256](https://github.com/docker/cli/pull/6256) +- Go-SDK: deprecate cli/command/stack/formatter. [docker/cli#6391](https://github.com/docker/cli/pull/6391) +- Go-SDK: deprecate cli/command/stack/loader. [docker/cli#6391](https://github.com/docker/cli/pull/6391) +- Go-SDK: deprecate cli/command/stack/options. [docker/cli#6391](https://github.com/docker/cli/pull/6391) +- Go-SDK: deprecate cli/command/stack/swarm. [docker/cli#6391](https://github.com/docker/cli/pull/6391) +- Go-SDK: opts: deprecate `NewNamedListOptsRef`, `NewNamedMapOpts`, `NamedListOpts`, `NamedMapOpts`, and `NamedOption`. These types and functions are no longer used and will be removed in the next release. [docker/cli#6292](https://github.com/docker/cli/pull/6292) +- Go-SDK: opts: deprecate `ParseEnvFile` in favor of `kvfile.Parse`. [docker/cli#6381](https://github.com/docker/cli/pull/6381) +- Go-SDK: opts: deprecate `QuotedString`. This utility is no longer used, and will be removed in the next release. [docker/cli#6275](https://github.com/docker/cli/pull/6275) +- Go-SDK: opts: deprecate `ValidateHost` utility. This function is no longer used, and will be removed in the next release. [docker/cli#6280](https://github.com/docker/cli/pull/6280) +- Go-SDK: pkg/jsonmessage: deprecate the `JSONMessage.From`, `JSONMessage.Time`, and `JSONMessage.TimeNano` fields, as they are no longer returned by the API for progress messages. Use the `events.Message` type instead to unmarshal the `/events` response. [moby/moby#50762](https://github.com/moby/moby/pull/50762) +- Go-SDK: the cli/registry/client package is deprecated and will be removed in the next release. [docker/cli#6313](https://github.com/docker/cli/pull/6313) + +## 28.3.3 + +{{< release-date date="2025-07-29" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.3.3 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.3.3) +- [moby/moby, 28.3.3 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.3.3) + +### Security + +This release fixes an issue where, after a firewalld reload, published container ports could be accessed directly from the local network, even when they were intended to be accessible only via a loopback address. [CVE-2025-54388](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2025-54388) / [GHSA-x4rx-4gw3-53p4](https://github.com/moby/moby/security/advisories/GHSA-x4rx-4gw3-53p4) / [moby/moby#50506](https://github.com/moby/moby/pull/50506). + +### Packaging updates + +- Update Buildx to [v0.26.1](https://github.com/docker/buildx/releases/tag/v0.26.1). [docker/docker-ce-packaging#1230](https://github.com/docker/docker-ce-packaging/pull/1230) +- Update Compose to [v2.39.1](https://github.com/docker/compose/releases/tag/v2.39.1). [docker/docker-ce-packaging#1234](https://github.com/docker/docker-ce-packaging/pull/1234) +- Update Docker Model CLI plugin to [v0.1.36](https://github.com/docker/model-cli/releases/tag/v0.1.36). [docker/docker-ce-packaging#1233](https://github.com/docker/docker-ce-packaging/pull/1233) + +## 28.3.2 + +{{< release-date date="2025-07-09" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.3.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.3.2) +- [moby/moby, 28.3.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.3.2) + +### Bug fixes and enhancements + +- Fix `--use-api-socket` not working correctly when targeting a remote daemon. [docker/cli#6157](https://github.com/docker/cli/pull/6157) +- Fix stray "otel error" logs being printed if debug logging is enabled. [docker/cli#6160](https://github.com/docker/cli/pull/6160) +- Quote SSH arguments when connecting to a remote daemon over an SSH connection to avoid unexpected expansion. [docker/cli#6147](https://github.com/docker/cli/pull/6147) +- Warn when `DOCKER_AUTH_CONFIG` is set during `docker login` and `docker logout`. [docker/cli#6163](https://github.com/docker/cli/pull/6163) + +### Packaging updates + +- Update Compose to [v2.38.2](https://github.com/docker/compose/releases/tag/v2.38.2). [docker/docker-ce-packaging#1225](https://github.com/docker/docker-ce-packaging/pull/1225) +- Update Docker Model CLI plugin to [v0.1.33](https://github.com/docker/model-cli/releases/tag/v0.1.33). [docker/docker-ce-packaging#1227](https://github.com/docker/docker-ce-packaging/pull/1227) +- Update Go runtime to 1.24.5. [moby/moby#50354](https://github.com/moby/moby/pull/50354) + +## 28.3.1 + +{{< release-date date="2025-07-02" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.3.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.3.1) +- [moby/moby, 28.3.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.3.1) + +### Packaging updates + +- Update BuildKit to [v0.23.2](https://github.com/moby/buildkit/releases/tag/v0.23.2). [moby/moby#50309](https://github.com/moby/moby/pull/50309) +- Update Compose to [v2.38.1](https://github.com/docker/compose/releases/tag/v2.38.1). [docker/docker-ce-packaging#1221](https://github.com/docker/docker-ce-packaging/pull/1221) +- Update Model to v0.1.32 which adds the support for the new top-level `models:` key in Docker Compose. [docker/docker-ce-packaging#1222](https://github.com/docker/docker-ce-packaging/pull/1222) + +## 28.3.0 + +{{< release-date date="2025-06-24" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.3.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.3.0) +- [moby/moby, 28.3.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.3.0) + +### New + +- Add support for AMD GPUs in `docker run --gpus`. [moby/moby#49952](https://github.com/moby/moby/pull/49952) +- Use `DOCKER_AUTH_CONFIG` as a credential store. [docker/cli#6008](https://github.com/docker/cli/pull/6008) + +### Bug fixes and enhancements + +- Ensure that the state of the container in the daemon database (used by [/containers/json](https://docs.docker.com/reference/api/engine/version/v1.49/#tag/Container/operation/ContainerList) API) is up to date when the container is stopped using the [/containers/{id}/stop](https://docs.docker.com/reference/api/engine/version/v1.49/#tag/Container/operation/ContainerStop) API (before response of API). [moby/moby#50136](https://github.com/moby/moby/pull/50136) +- Fix `docker image inspect inspect` omitting empty fields. [moby/moby#50135](https://github.com/moby/moby/pull/50135) +- Fix `docker images --tree` not marking images as in-use when the containerd image store is disabled. [docker/cli#6140](https://github.com/docker/cli/pull/6140) +- Fix `docker pull/push` hang in non-interactive when authentication is required caused by prompting for login credentials. [docker/cli#6141](https://github.com/docker/cli/pull/6141) +- Fix a potential resource leak when a node leaves a Swarm. [moby/moby#50115](https://github.com/moby/moby/pull/50115) +- Fix a regression where a login prompt on `docker pull` would show Docker Hub-specific hints when logging in on other registries. [docker/cli#6135](https://github.com/docker/cli/pull/6135) +- Fix an issue where all new tasks in the Swarm could get stuck in the PENDING state forever after scaling up a service with placement preferences. [moby/moby#50211](https://github.com/moby/moby/pull/50211) +- Remove an undocumented, hidden, top-level `docker remove` command that was accidentally introduced in Docker 23.0. [docker/cli#6144](https://github.com/docker/cli/pull/6144) +- Validate registry-mirrors configuration as part of `dockerd --validate` and improve error messages for invalid mirrors. [moby/moby#50240](https://github.com/moby/moby/pull/50240) +- `dockerd-rootless-setuptool.sh`: Fix the script from silently returning with no error message when subuid/subgid system requirements are not satisfied. [moby/moby#50059](https://github.com/moby/moby/pull/50059) +- containerd image store: Fix `docker push` not creating a tag on the remote repository. [moby/moby#50199](https://github.com/moby/moby/pull/50199) +- containerd image store: Improve handling of errors returned by the token server during `docker pull/push`. [moby/moby#50176](https://github.com/moby/moby/pull/50176) + +### Packaging updates + +- Allow customizing containerd service name for OpenRC. [moby/moby#50156](https://github.com/moby/moby/pull/50156) +- Update BuildKit to [v0.23.1](https://github.com/moby/buildkit/releases/tag/v0.23.1). [moby/moby#50243](https://github.com/moby/moby/pull/50243) +- Update Buildx to [v0.25.0](https://github.com/docker/buildx/releases/tag/v0.25.0). [docker/docker-ce-packaging#1217](https://github.com/docker/docker-ce-packaging/pull/1217) +- Update Compose to [v2.37.2](https://github.com/docker/compose/releases/tag/v2.37.2). [docker/docker-ce-packaging#1219](https://github.com/docker/docker-ce-packaging/pull/1219) +- Update Docker Model CLI plugin to [v0.1.30](https://github.com/docker/model-cli/releases/tag/v0.1.30). [docker/docker-ce-packaging#1218](https://github.com/docker/docker-ce-packaging/pull/1218) +- Update Go runtime to [1.24.4](https://go.dev/doc/devel/release#go1.24.4). [docker/docker-ce-packaging#1213](https://github.com/docker/docker-ce-packaging/pull/1213), [moby/moby#50153](https://github.com/moby/moby/pull/50153), [docker/cli#6124](https://github.com/docker/cli/pull/6124) + +### Networking + +- Revert Swarm related changes added in 28.2.x builds, due to a regression reported in https://github.com/moby/moby/issues/50129. [moby/moby#50169](https://github.com/moby/moby/pull/50169) + * Revert: Fix an issue where `docker network inspect --verbose` could sometimes crash the daemon (https://github.com/moby/moby/pull/49937). + * Revert: Fix an issue where the load-balancer IP address for an overlay network would not be released in certain cases if the Swarm was lacking an ingress network (https://github.com/moby/moby/pull/49948). + * Revert: Improve the reliability of NetworkDB in busy clusters and lossy networks (https://github.com/moby/moby/pull/49932). + * Revert: Improvements to the reliability and convergence speed of NetworkDB (https://github.com/moby/moby/pull/49939). +- Fix an issue that could cause container startup to fail, or lead to failed UDP port mappings, when some container ports are mapped to `0.0.0.0` and others are mapped to specific host addresses. [moby/moby#50054](https://github.com/moby/moby/pull/50054) +- The `network inspect` response for an overlay network now reports that `EnableIPv4` is true. [moby/moby#50147](https://github.com/moby/moby/pull/50147) +- Windows: Improve daemon startup time in cases where the host has networks of type `"Mirrored"`. [moby/moby#50155](https://github.com/moby/moby/pull/50155) +- Windows: Make sure `docker system prune` and `docker network prune` only remove networks created by Docker. [moby/moby#50154](https://github.com/moby/moby/pull/50154) + +### API + +- Update API version to 1.51. [moby/moby#50145](https://github.com/moby/moby/pull/50145) +- `GET /images/json` now sets the value of the `Containers` field for all images to the count of containers using the image. [moby/moby#50146](https://github.com/moby/moby/pull/50146) + +### Deprecations + +- Empty/nil image config fields in the `GET /images/{name}/json` response are now deprecated and will be removed in v29.0. [docker/cli#6129](https://github.com/docker/cli/pull/6129) +- api/types/container: deprecate `ExecOptions.Detach`. This field is not used, and will be removed in a future release. [moby/moby#50219](https://github.com/moby/moby/pull/50219) +- pkg/idtools: deprecate `IdentityMapping` and `Identity.Chown`. [moby/moby#50210](https://github.com/moby/moby/pull/50210) + +## 28.2.2 + +{{< release-date date="2025-05-30" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.2.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.2.2) +- [moby/moby, 28.2.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.2.2) + +### Bug fixes and enhancements + +- containerd image store: Fix a regression causing `docker build --push` to fail. This reverts [the fix](https://github.com/moby/moby/pull/49702) for `docker build` not persisting overridden images as dangling. [moby/moby#50105](https://github.com/moby/moby/pull/50105) + +### Networking + +- When creating the iptables `DOCKER-USER` chain, do not add an explicit `RETURN` rule, allowing users to append as well as insert their own rules. Existing rules are not removed on upgrade, but it won't be replaced after a reboot. [moby/moby#50098](https://github.com/moby/moby/pull/50098) + +## 28.2.1 + +{{< release-date date="2025-05-29" >}} + +### Packaging updates + +- Fix packaging regression in [v28.2.0](https://github.com/moby/moby/releases/tag/v28.2.0) which broke creating the `docker` group/user on fresh installations. [docker-ce-packaging#1209](https://github.com/docker/docker-ce-packaging/issues/1209) + +## 28.2.0 + +{{< release-date date="2025-05-28" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.2.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.2.0) +- [moby/moby, 28.2.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.2.0) + +> [!NOTE] +> RHEL packages are currently not available and will be released later. + +### New + +- Add `{{.Platform}}` as formatting option for `docker ps` to show the platform of the image the container is running. [docker/cli#6042](https://github.com/docker/cli/pull/6042) +- Add support for relative parent paths (`../`) on bind mount sources when using `docker run/create` with `-v/--volume` or `--mount type=bind` options. [docker/cli#4966](https://github.com/docker/cli/pull/4966) +- CDI is now enabled by default. [moby/moby#49963](https://github.com/moby/moby/pull/49963) +- Show discovered CDI devices in `docker info`. [docker/cli#6078](https://github.com/docker/cli/pull/6078) +- `docker image rm`: add `--platform` option to remove a variant from multi-platform images. [docker/cli#6109](https://github.com/docker/cli/pull/6109) +- containerd image store: Initial BuildKit support for building Windows container images on Windows (requires an opt-in with `DOCKER_BUILDKIT=1`). [moby/moby#49740](https://github.com/moby/moby/pull/49740) + +### Bug fixes and enhancements + +- Add a new log option for fluentd log driver (`fluentd-write-timeout`), which enables specifying write timeouts for fluentd connections. [moby/moby#49911](https://github.com/moby/moby/pull/49911) +- Add support for `DOCKER_AUTH_CONFIG` for the experimental `--use-api-socket` option. [docker/cli#6019](https://github.com/docker/cli/pull/6019) +- Fix `docker exec` waiting for 10 seconds if a non-existing user or group was specified. [moby/moby#49868](https://github.com/moby/moby/pull/49868) +- Fix `docker swarm init` ignoring `cacert` option of `--external-ca`. [docker/cli#5995](https://github.com/docker/cli/pull/5995) +- Fix an issue where the CLI would not correctly save the configuration file (`~/.docker/config.json`) if it was a relative symbolic link. [docker/cli#5282](https://github.com/docker/cli/pull/5282) +- Fix containers with `--restart always` policy using CDI devices failing to start on daemon restart. [moby/moby#49990](https://github.com/moby/moby/pull/49990) +- Fix shell-completion to only complete some flags once, even though they can be set multiple times. [docker/cli#6030](https://github.com/docker/cli/pull/6030) +- Fix the `plugin does not implement PluginAddr interface` error for Swarm CSI drivers. [moby/moby#49961](https://github.com/moby/moby/pull/49961) +- Improve `docker login` error messages for invalid options. [docker/cli#6036](https://github.com/docker/cli/pull/6036) +- Make sure the terminal state is restored if the CLI is forcefully terminated. [docker/cli#6058](https://github.com/docker/cli/pull/6058) +- Update the default seccomp profile to match the libseccomp v2.6.0. The new syscalls are: `listmount`, `statmount`, `lsm_get_self_attr`, `lsm_list_modules`, `lsm_set_self_attr`, `mseal`, `uretprobe`, `riscv_hwprobe`, `getxattrat`, `listxattrat`, `removexattrat`, and `setxattrat`. This prevents containers from receiving EPERM errors when using them. [moby/moby#50077](https://github.com/moby/moby/pull/50077) +- `docker inspect`: add shell completion, improve flag-description for `--type` and improve validation. [docker/cli#6052](https://github.com/docker/cli/pull/6052) +- containerd image store: Enable BuildKit garbage collector by default. [moby/moby#49899](https://github.com/moby/moby/pull/49899) +- containerd image store: Fix `docker build` not persisting overridden images as dangling. [moby/moby#49702](https://github.com/moby/moby/pull/49702) +- containerd image store: Fix `docker system df` reporting a negative reclaimable space amount. [moby/moby#49707](https://github.com/moby/moby/pull/49707) +- containerd image store: Fix duplicate `PUT` requests when pushing a multi-platform image. [moby/moby#49949](https://github.com/moby/moby/pull/49949) + +### Packaging updates + +- Drop Ubuntu 20.04 "Focal" packages as it reached end of life. [docker/docker-ce-packaging#1200](https://github.com/docker/docker-ce-packaging/pull/1200) +- Fix install location for RPM-based `docker-ce` man-pages. [docker/docker-ce-packaging#1203](https://github.com/docker/docker-ce-packaging/pull/1203) +- Update BuildKit to [v0.22.0](https://github.com/moby/buildkit/releases/tag/v0.22.0). [moby/moby#50046](https://github.com/moby/moby/pull/50046) +- Update Buildx to [v0.24.0](https://github.com/docker/buildx/releases/tag/v0.24.0). [docker/docker-ce-packaging#1205](https://github.com/docker/docker-ce-packaging/pull/1205) +- Update Compose to [v2.36.2](https://github.com/docker/compose/releases/tag/v2.36.2). [docker/docker-ce-packaging#1208](https://github.com/docker/docker-ce-packaging/pull/1208) +- Update Go runtime to [1.24.3](https://go.dev/doc/devel/release#go1.24.3). [docker/docker-ce-packaging#1192](https://github.com/docker/docker-ce-packaging/pull/1192), [docker/cli#6060](https://github.com/docker/cli/pull/6060), [moby/moby#49174](https://github.com/moby/moby/pull/49174) + +### Networking + +- Add bridge network option `"com.docker.network.bridge.trusted_host_interfaces"`, accepting a colon-separated list of interface names. These interfaces have direct access to published ports on container IP addresses. [moby/moby#49832](https://github.com/moby/moby/pull/49832) +- Add daemon option `"allow-direct-routing"` to disable filtering of packets from outside the host addressed directly to containers. [moby/moby#49832](https://github.com/moby/moby/pull/49832) +- Do not display network options `com.docker.network.enable_ipv4` or `com.docker.network.enable_ipv6` in inspect output if they have been overridden by `EnableIPv4` or `EnableIPv6` in the network create request. [moby/moby#49866](https://github.com/moby/moby/pull/49866) +- Fix an issue that could cause network deletion to fail after a daemon restart, with error "has active endpoints" listing empty endpoint names. [moby/moby#49901](https://github.com/moby/moby/pull/49901) +- Fix an issue where `docker network inspect --verbose` could sometimes crash the daemon. [moby/moby#49937](https://github.com/moby/moby/pull/49937) +- Fix an issue where the load-balancer IP address for an overlay network would not be released in certain cases if the Swarm was lacking an ingress network. [moby/moby#49948](https://github.com/moby/moby/pull/49948) +- Improve the reliability of NetworkDB in busy clusters and lossy networks. [moby/moby#49932](https://github.com/moby/moby/pull/49932) +- Improvements to the reliability and convergence speed of NetworkDB. [moby/moby#49939](https://github.com/moby/moby/pull/49939) + +### API + +- `DELETE /images/{name}` now supports a `platforms` query parameter. It accepts an array of JSON-encoded OCI Platform objects, allowing for selecting a specific platforms to delete content for. [moby/moby#49982](https://github.com/moby/moby/pull/49982) +- `GET /info` now includes a `DiscoveredDevices` field. This is an array of `DeviceInfo` objects, each providing details about a device discovered by a device driver. [moby/moby#49980](https://github.com/moby/moby/pull/49980) + +### Go SDK + +- `api/types/container`: add `ContainerState` and constants for container state. [moby/moby#49965](https://github.com/moby/moby/pull/49965) +- `api/types/container`: change `Summary.State` to a `ContainerState`. [moby/moby#49991](https://github.com/moby/moby/pull/49991) +- `api/types/container`: define `HealthStatus` type for health-status constants. [moby/moby#49876](https://github.com/moby/moby/pull/49876) +- `api/types`: deprecate `BuildResult`, `ImageBuildOptions`, `ImageBuildOutput`, `ImageBuildResponse`, `BuilderVersion`, `BuilderV1`, and `BuilderBuildKi` which were moved to `api/types/build`. [moby/moby#50025](https://github.com/moby/moby/pull/50025) + +### Deprecations + +- API: Deprecated: `GET /images/{name}/json` no longer returns the following fields: `Config`, `Hostname`, `Domainname`, `AttachStdin`, `AttachStdout`, `AttachStderr`, `Tty`, `OpenStdin`, `StdinOnce`, `Image`, `NetworkDisabled` (already omitted unless set), `MacAddress` (already omitted unless set), `StopTimeout` (already omitted unless set). These additional fields were included in the response due to an implementation detail but not part of the image's Configuration, were marked deprecated in API v1.46, and are now omitted. [moby/moby#48457](https://github.com/moby/moby/pull/48457) +- Go-SDK: Deprecate builder/remotecontext.Rel(). This function was needed on older versions of Go, but can now be replaced by `filepath.Rel()`. [moby/moby#49843](https://github.com/moby/moby/pull/49843) +- Go-SDK: api/types: deprecate `BuildCachePruneOptions` in favor of `api/types/builder.CachePruneOptions`. [moby/moby#50015](https://github.com/moby/moby/pull/50015) +- Go-SDK: api/types: deprecate `BuildCachePruneReport` in favor of `api/types/builder.CachePruneReport`. [moby/moby#50015](https://github.com/moby/moby/pull/50015) +- Go-SDK: api/types: deprecate `NodeListOptions`, `NodeRemoveOptions`, `ServiceCreateOptions`, `ServiceUpdateOptions`, `RegistryAuthFromSpec`, `RegistryAuthFromPreviousSpec`, `ServiceListOptions`, `ServiceInspectOptions`, and `SwarmUnlockKeyResponse` which were moved to `api/types/swarm`. [moby/moby#50027](https://github.com/moby/moby/pull/50027) +- Go-SDK: api/types: deprecate `SecretCreateResponse`, `SecretListOptions`, `ConfigCreateResponse`, `ConfigListOptions` which were moved to api/types/swarm. [moby/moby#50024](https://github.com/moby/moby/pull/50024) +- Go-SDK: client: deprecate `IsErrNotFound`. [moby/moby#50012](https://github.com/moby/moby/pull/50012) +- Go-SDK: container: deprecate `IsValidHealthString` in favor of `api/types/container.ValidateHealthStatus`. [moby/moby#49893](https://github.com/moby/moby/pull/49893) +- Go-SDK: container: deprecate `StateStatus`, `WaitCondition`, and the related `WaitConditionNotRunning`, `WaitConditionNextExit`, and `WaitConditionRemoved` consts in favor of their equivalents in `api/types/container`. [moby/moby#49874](https://github.com/moby/moby/pull/49874) +- Go-SDK: opts: deprecate `ListOpts.GetAll` in favor of `ListOpts.GetSlice`. [docker/cli#6032](https://github.com/docker/cli/pull/6032) +- Remove deprecated `IsAutomated` formatting placeholder from `docker search`. [docker/cli#6091](https://github.com/docker/cli/pull/6091) +- Remove fallback for pulling images from non-OCI-compliant `docker.pkg.github.com` registry. [moby/moby#50094](https://github.com/moby/moby/pull/50094) +- Remove support for pulling legacy v2, schema 1 images and remove `DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE` environment-variable. [moby/moby#50036](https://github.com/moby/moby/pull/50036), [moby/moby#42300](https://github.com/moby/moby/pull/42300) +- The `BridgeNfIptables` and `BridgeNfIp6tables` fields in the `GET /info` response were deprecated in API v1.48, and are now omitted in API v1.50. [moby/moby#49904](https://github.com/moby/moby/pull/49904) +- errdefs: Deprecate `errdefs.FromStatusCode`. Use containerd's `errhttp.ToNative` instead. [moby/moby#50030](https://github.com/moby/moby/pull/50030) ## 28.1.1 diff --git a/content/manuals/engine/release-notes/29.md b/content/manuals/engine/release-notes/29.md new file mode 100644 index 00000000000..0d7f28ee390 --- /dev/null +++ b/content/manuals/engine/release-notes/29.md @@ -0,0 +1,874 @@ +--- +title: Docker Engine version 29 release notes +linkTitle: Engine v29 +description: Learn about the new features, bug fixes, and breaking changes for Docker Engine +keywords: docker, docker engine, ce, whats new, release notes +toc_min: 1 +toc_max: 2 +tags: + - Release notes +aliases: + - /engine/release-notes/ + - /engine/release-notes/latest/ + - /release-notes/docker-ce/ + - /release-notes/docker-engine/ + - /engine/release-notes/29.0/ +--- + +This page describes the latest changes, additions, known issues, and fixes for Docker Engine version 29. + +For more information about: + +- Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history/). + +## 29.4.3 + +{{< release-date date="2026-05-06" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.4.3 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.4.3) +- [moby/moby, 29.4.3 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.4.3) + +### Security + +- **CVE-2026-31431**: Replace the socketcall(2) seccomp deny that broke 32-bit programs with targeted AppArmor (deny network alg) and SELinux (alg_socket) rules that block AF_ALG at the LSM layer, covering both socket(2) and socketcall(2) paths without disrupting legitimate 32-bit workloads. [moby/moby#52537](https://github.com/moby/moby/pull/52537) + + On SELinux-based systems, the SELinux mitigation requires the daemon to be configured with `selinux-enabled: true` (via `daemon.json` or the `--selinux-enabled` CLI flag). This option is not enabled by default. + +- Fix the default AppArmor profile not being updated on daemon restart, requiring a system reboot to pick up profile changes from daemon upgrades. [moby/moby#52537](https://github.com/moby/moby/pull/52537) + +## 29.4.2 + +{{< release-date date="2026-05-01" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.4.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.4.2) +- [moby/moby, 29.4.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.4.2) + +### Security + +This release includes hardening for **CVE-2026-31431**. + +- Block `AF_ALG` sockets and the `socketcall(2)` multiplexer in the default seccomp profile to prevent in-container privilege escalation via the kernel crypto API ("Copy Fail"). [moby/moby#52501](https://github.com/moby/moby/pull/52501) + +### Known issues + +The hardening can break 32-bit programs and i386 images, including SteamCMD and some Wine-based workloads. [moby/moby#52506](https://github.com/moby/moby/issues/52506) + +#### Workaround + +> [!WARNING] +> Don't use `--security-opt seccomp=unconfined` to work around this issue. +> Don't use the `seccomp/v0.2.0` profile. + +If you need a workaround, use the `seccomp/v0.2.1` profile from `moby/profiles`. +Make sure you use a kernel that includes the fix for CVE-2026-31431. + +This profile unblocks `socketcall` while keeping `AF_ALG` blocked for `socket`. + +> [!IMPORTANT] +> Use this workaround only for containers that require it. +> Containers that use this profile can still exploit CVE-2026-31431 through the `socketcall` syscall. + +Download the `seccomp/v0.2.1` profile: + +```console +$ curl -fsSL https://raw.githubusercontent.com/moby/profiles/refs/tags/seccomp/v0.2.1/seccomp/default.json \ + -o /etc/docker/seccomp-profile-v0.2.1.json +``` + +Use one of these options. You don't need both. + +1. To use the profile for a specific container when you control the `docker run` command, use `--security-opt`: + + ```console + $ docker run --security-opt seccomp= ... + ``` + +2. To use the profile as the default for containers created by the daemon, add `seccomp-profile` to your `daemon.json`: + + ```json + { + "seccomp-profile": "/etc/docker/seccomp-profile-v0.2.1.json" + } + ``` + +## 29.4.1 + +{{< release-date date="2026-04-20" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.4.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.4.1) +- [moby/moby, 29.4.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.4.1) + +### Bug fixes and enhancements + +- containerd image store: Fix `docker image prune --filter label!=key=value` incorrectly skipping images that don't have the specified label. [moby/moby#52338](https://github.com/moby/moby/pull/52338) +- Fix `--log-opt "tag={{.ImageID}}"` not stripping the digest's algorithm. [moby/moby#52343](https://github.com/moby/moby/pull/52343) +- Fix intermittent container start failures (`EBUSY` on secrets/configs remount) on busy Swarm nodes by retrying the read-only remount. [moby/moby#52235](https://github.com/moby/moby/pull/52235) + +### Packaging updates + +- Update containerd (static binaries only) to [v2.2.3](https://github.com/containerd/containerd/releases/tag/v2.2.3). [moby/moby#52360](https://github.com/moby/moby/pull/52360) +- Update Go runtime to [1.26.2](https://go.dev/doc/devel/release#go1.26.2). [docker/cli#6920](https://github.com/docker/cli/pull/6920), [moby/moby#52329](https://github.com/moby/moby/pull/52329) + +### Networking + +- if a container has an IPv4-only or an IPv6-only endpoint with higher "gateway priority" than a dual stack endpoint, the single stack endpoint will now be used as the default gateway for its address family. [moby/moby#52328](https://github.com/moby/moby/pull/52328) + + +## 29.4.0 + +{{< release-date date="2026-04-07" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.4.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.4.0) +- [moby/moby, 29.4.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.4.0) + +### Bug fixes and enhancements + +- docker cp: report both content size and transferred size. [docker/cli#6800](https://github.com/docker/cli/pull/6800) +- Fix `docker stats --all` still showing containers that were removed. [docker/cli#6863](https://github.com/docker/cli/pull/6863) +- Fix a rare bug that could cause containers to become unremovable. [moby/moby#51724](https://github.com/moby/moby/pull/51724) +- Fixed privileged containers losing their explicit AppArmor profile (`--security-opt apparmor=`) after a container restart. [moby/moby#52215](https://github.com/moby/moby/pull/52215) +- Improved duplicate container-exit handling by using live containerd task state (not timestamps). [moby/moby#52156](https://github.com/moby/moby/pull/52156) +- Improved image pull and push performance by enabling HTTP keep-alive for registry connections, avoiding redundant TCP and TLS handshakes. [moby/moby#52198](https://github.com/moby/moby/pull/52198) +- shell completions: add shell completion for `docker rm --link` and exclude legacy links for container names. [docker/cli#6872](https://github.com/docker/cli/pull/6872) +- shell completions: don't provide completions that were already used. [docker/cli#6871](https://github.com/docker/cli/pull/6871) +- Update runc (in static binaries) to [v1.3.5](https://github.com/opencontainers/runc/releases/tag/v1.3.5). [moby/moby#52244](https://github.com/moby/moby/pull/52244) +- Windows: Fix `DOCKER_TMPDIR` not being respected. [moby/moby#52181](https://github.com/moby/moby/pull/52181) + +### Packaging updates + +- Update BuildKit to [v0.29.0](https://github.com/moby/buildkit/releases/tag/v0.29.0). [moby/moby#52272](https://github.com/moby/moby/pull/52272) + +### Networking + +- Prevent a daemon crash during startup after upgrading if a container config containers a malformed IP-address. [moby/moby#52275](https://github.com/moby/moby/pull/52275) + +### Go SDK + +- cli/streams: Out, In: preserve original os.File when available. [docker/cli#6906](https://github.com/docker/cli/pull/6906) +- Update minimum go version to go1.25. [docker/cli#6897](https://github.com/docker/cli/pull/6897) + +### Deprecations + +- Go SDK: cli-plugins/hooks: deprecate `HookMessage ` and rename to `cli-plugins/hooks.Response`. [docker/cli#6859](https://github.com/docker/cli/pull/6859) +- Go SDK: cli-plugins/hooks: deprecate `HookType` and rename to `cli-plugins/hooks.ResponseType`. [docker/cli#6859](https://github.com/docker/cli/pull/6859) +- Go SDK: cli-plugins/manager: deprecate `HookPluginData` and move to `cli-plugins/hooks.Request`. [docker/cli#6859](https://github.com/docker/cli/pull/6859) + +## 29.3.1 + +{{< release-date date="2026-03-25" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.3.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.3.1) +- [moby/moby, 29.3.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.3.1) + +### Security + +This release includes fixes for multiple security vulnerabilities affecting Docker Engine and related components. + +- **CVE-2026-34040** Fix an authorization bypass in AuthZ plugins that could allow authorization plugins to be bypassed under specific conditions. + [GHSA-x744-4wpc-v9h2](https://github.com/moby/moby/security/advisories/GHSA-x744-4wpc-v9h2) + +- **CVE-2026-33997** Fix a flaw in `docker plugin install` where privilege validation could be partially bypassed, potentially leading to unauthorized privilege escalation. + [GHSA-pxq6-2prw-chj9](https://github.com/moby/moby/security/advisories/GHSA-pxq6-2prw-chj9) + +- **CVE-2026-33748** Fix insufficient validation of Git URL `#ref:subdir` fragments in BuildKit, which could allow access to files outside the intended repository scope. + [GHSA-4vrq-3vrq-g6gg](https://github.com/moby/buildkit/security/advisories/GHSA-4vrq-3vrq-g6gg) + +- **CVE-2026-33747** Fix a vulnerability in BuildKit where an untrusted frontend could cause files to be written outside the BuildKit state directory. + [GHSA-3c29-8rgm-jvjj](https://github.com/moby/buildkit/security/advisories/GHSA-4c29-8rgm-jvjj) + +### Bug fixes and enhancements + +- Fix a daemon crash during docker build if `.dockerignore` contained an invalid pattern. [moby/moby#52214](https://github.com/moby/moby/pull/52214) +- Fix a panic when the containerd client uses a closed stream. [moby/moby#52211](https://github.com/moby/moby/pull/52211) + +### Packaging updates + +- Update containerd (static binaries) to [v2.2.2](https://github.com/containerd/containerd/releases/tag/v2.2.2). [moby/moby#52213](https://github.com/moby/moby/pull/52213) +- Update Go runtime to [1.25.8](https://go.dev/doc/devel/release#go1.25.8). [moby/moby#52210](https://github.com/moby/moby/pull/52210), [docker/cli#6883](https://github.com/docker/cli/pull/6883) + +### Go SDK + +- Add missing build-tag, which could cause `cannot range over 10 (untyped int constant)` when importing the `cli/command` package. [docker/cli#6884](https://github.com/docker/cli/pull/6884) + +## 29.3.0 + +{{< release-date date="2026-03-05" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.3.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.3.0) +- [moby/moby, 29.3.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.3.0) + +### New + +- Add `bind-create-src` option to `--mount` flag for bind mounts. [docker/cli#6792](https://github.com/docker/cli/pull/6792) +- CLI plugin hooks now fire on command failure (not just success), and plugins can use "error-hooks" to show hints only when commands fail. [docker/cli#6794](https://github.com/docker/cli/pull/6794) +- Lower minimum API version from v1.44 to v1.40 (Docker 19.03). [moby/moby#52067](https://github.com/moby/moby/pull/52067) + +### Packaging updates + +- Update BuildKit to [v0.28.0](https://github.com/moby/buildkit/releases/tag/v0.28.0). [moby/moby#52135](https://github.com/moby/moby/pull/52135) + +### Networking + +- Fix DNS config corruption on daemon reload. [moby/moby#52060](https://github.com/moby/moby/pull/52060) + +### API + +- `POST /networks/{id}/connect` now correctly applies the `MacAddress` field in `EndpointSettings`. This field was added in API v1.44, but was previously ignored. [moby/moby#52040](https://github.com/moby/moby/pull/52040) +- `GET /images/json` now supports an `identity` query parameter. When set, the response includes manifest summaries and may include an `Identity` field for each manifest with trusted identity and origin information. [moby/moby#52030](https://github.com/moby/moby/pull/52030) + +### Bug fixes and enhancements + +- The `--gpus` option now uses CDI-based injection for AMD GPUs. [moby/moby#52048](https://github.com/moby/moby/pull/52048) +- Add `sd_notify` ["RELOADING"](https://www.freedesktop.org/software/systemd/man/latest/sd_notify.html#RELOADING=1) notifications when signalling the daemon to reload its configuration. [moby/moby#52041](https://github.com/moby/moby/pull/52041) +- Send `sd_notify` ["READY"](https://www.freedesktop.org/software/systemd/man/latest/sd_notify.html#READY=1) and ["STOPPING"](https://www.freedesktop.org/software/systemd/man/latest/sd_notify.html#STOPPING=1) synchronously to make sure they are sent before we proceed. [moby/moby#52041](https://github.com/moby/moby/pull/52041) +- Add support for the systemd 253 `Type=notify-reload` service reload protocol. [moby/moby#52041](https://github.com/moby/moby/pull/52041) +- Don't log "failed to determine if container is already mounted" warnings for stopped containers during startup. [moby/moby#52076](https://github.com/moby/moby/pull/52076) +- Fix `docker system prune` failing with "rw layer snapshot not found" when a container is concurrently removed. [moby/moby#52090](https://github.com/moby/moby/pull/52090) +- Fix a panic when running `docker top` on a non-running Windows container. [moby/moby#52025](https://github.com/moby/moby/pull/52025) +- Fix a regression in v29.2.0 that prevented registering the dockerd service on Windows if system requirements were not yet installed. [moby/moby#52006](https://github.com/moby/moby/pull/52006) +- Fix shared mount detection for paths mounted multiple times, which caused "not a shared mount" errors when using bind propagation. [moby/moby#51787](https://github.com/moby/moby/pull/51787) +- Fix spurious "ShouldRestart failed" warning on shutdown. [moby/moby#52079](https://github.com/moby/moby/pull/52079) +- Preserve leading and trailing whitespace when storing registry passwords. [docker/cli#6784](https://github.com/docker/cli/pull/6784) +- Prevent logging "not found" warnings when calculating volume sizes. [moby/moby#52018](https://github.com/moby/moby/pull/52018) +- Update Go runtime to [1.25.7](https://go.dev/doc/devel/release#go1.25.7). [moby/moby#52003](https://github.com/moby/moby/pull/52003), [docker/cli#6780](https://github.com/docker/cli/pull/6780) + +## 29.2.1 + +{{< release-date date="2026-02-02" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.2.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.2.1) +- [moby/moby, 29.2.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.2.1) + +### Bug fixes and enhancements + +- Update BuildKit to [v0.27.1](https://github.com/moby/buildkit/releases/tag/v0.27.1). [moby/moby#51962](https://github.com/moby/moby/pull/51962) +- Fix `docker system df` failing when run concurrently with `docker system prune`. [moby/moby#51979](https://github.com/moby/moby/pull/51979) +- Fix daemon handling of duplicate container exit events to avoid repeated cleanup and state transitions. [moby/moby#51925](https://github.com/moby/moby/pull/51925) +- Fix panic after failed daemon initialization. [moby/moby#51943](https://github.com/moby/moby/pull/51943) +- Fix encrypted overlay networks not passing traffic to containers on v28 and older Engines. Encrypted overlay networks will no longer pass traffic to containers on v29.2.0 thru v29.0.0, v28.2.2, v25.0.14 or v25.0.13. [moby/moby#51951](https://github.com/moby/moby/pull/51951) +- Fix potential panic on `docker network prune`. [moby/moby#51966](https://github.com/moby/moby/pull/51966) + +## 29.2.0 + +{{< release-date date="2026-01-26" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.2.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.2.0) +- [moby/moby, 29.2.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.2.0) + +### New + +- `docker info` now includes `NRI` section. [docker/cli#6710](https://github.com/docker/cli/pull/6710) +- Add experimental NRI support. [moby/moby#51711](https://github.com/moby/moby/pull/51711), [moby/moby#51712](https://github.com/moby/moby/pull/51712), [moby/moby#51675](https://github.com/moby/moby/pull/51675), [moby/moby#51674](https://github.com/moby/moby/pull/51674), [moby/moby#51636](https://github.com/moby/moby/pull/51636), [moby/moby#51634](https://github.com/moby/moby/pull/51634) +- New `Identity` field has been added to the inspect endpoint to show trusted origin information about the image. This includes build ref for locally built images, remote registry repository for pulled images, and verified signature information for images that contain a valid signed provenance attestation. [moby/moby#51737](https://github.com/moby/moby/pull/51737) + +### Bug fixes and enhancements + +- Improve validation of `--detach-keys` command-line options. [docker/cli#6742](https://github.com/docker/cli/pull/6742) +- Prevent a potential panic on daemon shutdown after an incomplete initialization. [moby/moby#51797](https://github.com/moby/moby/pull/51797) +- Remove restriction on anonymous read-only volumes. [moby/moby#51682](https://github.com/moby/moby/pull/51682) +- The `--validate` flag on dockerd now also verifies system requirements, allowing for system requirements to be checked before starting the daemon. [moby/moby#51868](https://github.com/moby/moby/pull/51868) +- Handle `--gpus` requests for NVIDIA devices using CDI if possible. [moby/moby#50228](https://github.com/moby/moby/pull/50228) + +### Packaging updates + +- Update BuildKit to [v0.27.0](https://github.com/moby/buildkit/releases/tag/v0.27.0). [moby/moby#51886](https://github.com/moby/moby/pull/51886) +- Update containerd (static binaries only) to [v2.2.1](https://github.com/containerd/containerd/releases/tag/v2.2.1). [moby/moby#51765](https://github.com/moby/moby/pull/51765) + +### Rootless + +- Rootless: Consider `$XDG_CONFIG_HOME/cdi` and `$XDG_RUNTIME_DIR/cdi` when looking for CDI devices. [moby/moby#51624](https://github.com/moby/moby/pull/51624) +- Update RootlessKit to [v2.3.6](https://github.com/rootless-containers/rootlesskit/releases/tag/v2.3.6). [moby/moby#51757](https://github.com/moby/moby/pull/51757) + +### API + +- Natively support gRPC on the listening socket. [moby/moby#50744](https://github.com/moby/moby/pull/50744) + +### Go SDK + +- cli/command: add WithAPIClientOptions option. [docker/cli#6740](https://github.com/docker/cli/pull/6740) + +### Deprecations + +- Remove `%PROGRAMDATA%\Docker\cli-plugins` from the list of paths used for CLI plugins on Windows. This path was present for backward compatibility with old installation, but replaced by `%ProgramFiles%\Docker\cli-plugins`. [docker/cli#6713](https://github.com/docker/cli/pull/6713) + +## 29.1.5 + +{{< release-date date="2026-01-16" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.1.5 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.1.5) +- [moby/moby, 29.1.5 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.1.5) + +### Packaging updates + +- Update Go runtime to [1.25.6](https://go.dev/doc/devel/release#go1.25.6). [moby/moby#51860](https://github.com/moby/moby/pull/51860), [docker/cli#6750](https://github.com/docker/cli/pull/6750) + +### Networking + +- Fixed a regression where established network connections could be disrupted during a container's shutdown grace period. [moby/moby#51843](https://github.com/moby/moby/pull/51843) + +## 29.1.4 + +{{< release-date date="2026-01-08" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.1.4 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.1.4) +- [moby/moby, 29.1.4 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.1.4) + +### Bug fixes and enhancements + +- Fix `docker run --network none` panic on Windows. [moby/moby#51830](https://github.com/moby/moby/pull/51830) +- Fix image mounts failing with "file name too long" for long mount paths. [moby/moby#51829](https://github.com/moby/moby/pull/51829) +- Fix potential creation of orphaned overlay2 layers. [moby/moby#51826](https://github.com/moby/moby/pull/51826), [moby/moby#51824](https://github.com/moby/moby/pull/51824) + +### Packaging updates + +- Update BuildKit to [v0.26.3](https://github.com/moby/buildkit/releases/tag/v0.26.3). [moby/moby#51821](https://github.com/moby/moby/pull/51821) + +## 29.1.3 + +{{< release-date date="2025-12-12" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.1.3 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.1.3) +- [moby/moby, 29.1.3 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.1.3) + +### Bug fixes and enhancements + +- Add shell completion for `docker stack deploy --compose-file`. [docker/cli#6690](https://github.com/docker/cli/pull/6690) +- containerd image store: Fix a bug causing `docker build` to ignore the explicitly set `unpack` image exporter option. [moby/moby#51514](https://github.com/moby/moby/pull/51514) +- Fix `docker image ls` dangling image handling. [docker/cli#6704](https://github.com/docker/cli/pull/6704) +- Fix a bug that could cause the Engine to leave containers with autoremove set in 'dead' state on shutdown, and never reclaim them. [moby/moby#51693](https://github.com/moby/moby/pull/51693) +- Fix build on i386. [moby/moby#51528](https://github.com/moby/moby/pull/51528) +- Fix explicit graphdriver configuration (`"storage-driver"`) being treated as containerd snapshotter when prior graphdriver state exists. [moby/moby#51516](https://github.com/moby/moby/pull/51516) +- Fix potential creation of orphaned overlay2 layers. [moby/moby#51703](https://github.com/moby/moby/pull/51703) + +### Networking + +- Allow creation of a container with a specific IP address when its networks were not configured with a specific subnet. [moby/moby#51583](https://github.com/moby/moby/pull/51583) +- Don't crash when starting a container created via the API before upgrade to v29.1.2, with `PublishAll` and a nil `PortBindings` map. [moby/moby#51691](https://github.com/moby/moby/pull/51691) +- Fix a bug preventing DNS resolution of containers attached to non swarm-scoped networks once the node has joined a Swarm cluster. [moby/moby#51515](https://github.com/moby/moby/pull/51515) +- Fix an issue that caused daemon crash when using a remote network driver plugin. [moby/moby#51558](https://github.com/moby/moby/pull/51558) +- Fix an issue that could lead to an "endpoint not found" error when creating a container with multiple network connections, when one of the networks is non-internal but does not have its own external IP connectivity. [moby/moby#51538](https://github.com/moby/moby/pull/51538) +- Fix an issue that prevented rootless Docker from starting on a host with IPv6 disabled. [moby/moby#51543](https://github.com/moby/moby/pull/51543) +- Return an error when a container is created with a port-mapping pointing to container port 0. [moby/moby#51695](https://github.com/moby/moby/pull/51695) + +## 29.1.2 + +{{< release-date date="2025-12-02" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.1.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.1.2) +- [moby/moby, 29.1.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.1.2) + +### Security + +- Update Go runtime to [1.25.5](https://go.dev/doc/devel/release#go1.25.5). [moby/moby#51648](https://github.com/moby/moby/pull/51648), [docker/cli#6688](https://github.com/docker/cli/pull/6688) + - Fixes a potential DoS via excessive resource usage when formatting hostname validation errors [**CVE-2025-61729**](https://nvd.nist.gov/vuln/detail/CVE-2025-61729) + - Fixes incorrect enforcement of excluded subdomain constraints for wildcard SANs, which could allow improperly trusted certificates [**CVE-2025-61727**](https://nvd.nist.gov/vuln/detail/CVE-2025-22874) + +### Bug fixes and enhancements + +- containerd image store: Fix `docker image inspect` failing to return available image data in case where not all distributable blobs are available locally. [moby/moby#51629](https://github.com/moby/moby/pull/51629) +- dockerd-rootless-setuptool.sh: fix `nsenter: no namespace specified`. [moby/moby#51622](https://github.com/moby/moby/pull/51622) +- Fix `docker system df` showing `N/A` for shared size and unique size when using graph-drivers as storage. [moby/moby#51631](https://github.com/moby/moby/pull/51631) + +### Packaging updates + +- Update runc (in static binaries) to [v1.3.4](https://github.com/opencontainers/runc/releases/tag/v1.3.4). [moby/moby#51633](https://github.com/moby/moby/pull/51633) + +### Networking + +- Fix a bug preventing port mappings in rootless mode when slirp4netns is used. [moby/moby#51616](https://github.com/moby/moby/pull/51616) +- Prevent a crash when making an API request with `HostConfig.PublishAllPorts` set (`-P`), and no port bindings. [moby/moby#51621](https://github.com/moby/moby/pull/51621) + +## 29.1.1 + +{{< release-date date="2025-11-28" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.1.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.1.1) +- [moby/moby, 29.1.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.1.1) + +### Networking + +- Revert a PR breaking external DNS resolution on all custom bridge networks. [moby/moby#51615](https://github.com/moby/moby/pull/51615) + +## 29.1.0 + +{{< release-date date="2025-11-27" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.1.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.1.0) +- [moby/moby, 29.1.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.1.0) + +### Packaging updates + +- Update BuildKit to [v0.26.1](https://github.com/moby/buildkit/releases/tag/v0.26.1). [moby/moby#51551](https://github.com/moby/moby/pull/51551) +- Update containerd binary to v2.2.0 (static binaries). [moby/moby#51271](https://github.com/moby/moby/pull/51271) + +### Networking + +- Do not overwrite user-modified `/etc/resolv.conf` across container restarts. [moby/moby#51507](https://github.com/moby/moby/pull/51507) +- fix `--publish-all` / `-P` for Windows containers. [moby/moby#51586](https://github.com/moby/moby/pull/51586) +- Fix an issue that prevented container restart or network reconnection when gateway configuration failed during container stop or network disconnect. [moby/moby#51592](https://github.com/moby/moby/pull/51592) +- Windows containers: don't display an IPv6-mapped IPv4 address in port mappings. For example, `[::ffff:0.0.0.0]:8080->80/tcp` instead of `0.0.0.0:8080->80/tcp`. [moby/moby#51587](https://github.com/moby/moby/pull/51587) + +## 29.0.4 + +{{< release-date date="2025-11-24" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.0.4 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.0.4) +- [moby/moby, 29.0.4 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.0.4) + +### Bug fixes and enhancements + +- `docker image ls` no longer truncates the image names. [docker/cli#6675](https://github.com/docker/cli/pull/6675) + +### Networking + +- Allow creation of a container with a specific IP address when its networks were not configured with a specific subnet. [moby/moby#51583](https://github.com/moby/moby/pull/51583) + +## 29.0.3 + +{{< release-date date="2025-11-24" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.0.3 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.0.3) +- [moby/moby, 29.0.3 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.0.3) + +### Bug fixes and enhancements + +- `docker version --format json`: restore top-level `BuildTime` field to use RFC3339Nano format. [docker/cli#6668](https://github.com/docker/cli/pull/6668) +- Fix `docker image ls` ignoring a custom `imageFormat` from `docker.json`. [docker/cli#6667](https://github.com/docker/cli/pull/6667) + +### Networking + +- Fix an issue that caused daemon crash when using a remote network driver plugin. [moby/moby#51558](https://github.com/moby/moby/pull/51558) + +## 29.0.2 + +{{< release-date date="2025-11-17" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.0.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.0.2) +- [moby/moby, 29.0.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.0.2) + +### Networking + +- Fix an issue that could lead to an "endpoint not found" error when creating a container with multiple network connections, when one of the networks is non-internal but does not have its own external IP connectivity. [moby/moby#51538](https://github.com/moby/moby/pull/51538) +- Fix an issue that prevented rootless Docker from starting on a host with IPv6 disabled. [moby/moby#51543](https://github.com/moby/moby/pull/51543) + +## 29.0.1 + +{{< release-date date="2025-11-14" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.0.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.0.1) +- [moby/moby, 29.0.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.0.1) + +### Bug fixes and enhancements + +- `docker image ls` no longer truncates the name width when output is redirect (e.g. for `grep`). [docker/cli#6656](https://github.com/docker/cli/pull/6656) +- `docker image ls` now considers the `NO_COLOR` environment variable for choosing the colored output. [docker/cli#6654](https://github.com/docker/cli/pull/6654) +- containerd image store: Fix a bug causing `docker build` to ignore the explicitly set `unpack` image exporter option. [moby/moby#51514](https://github.com/moby/moby/pull/51514) +- Fix a bug causing `docker image ls --all` to not show untagged/dangling images. [docker/cli#6657](https://github.com/docker/cli/pull/6657) +- Fix build on i386. [moby/moby#51528](https://github.com/moby/moby/pull/51528) +- Fix explicit graphdriver configuration (`"storage-driver"`) being treated as containerd snapshotter when prior graphdriver state exists. [moby/moby#51516](https://github.com/moby/moby/pull/51516) +- Fix output format of the `ApiVersion` and `MinApiVersion` fields in `docker version --format=json` to align with previous versions. [docker/cli#6648](https://github.com/docker/cli/pull/6648) + +### Networking + +- Fix a bug preventing DNS resolution of containers attached to non swarm-scoped networks once the node has joined a Swarm cluster. [moby/moby#51515](https://github.com/moby/moby/pull/51515) + +## 29.0.0 + +{{< release-date date="2025-11-10" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 29.0.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A29.0.0) +- [moby/moby, 29.0.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A29.0.0) + +> [!CAUTION] +> This release includes several breaking changes and deprecations. Review the release notes carefully before upgrading. + +- Experimental support for nftables can now be enabled by setting Docker daemon's `firewall-backend` option to `nftables`. For more information, see [Docker Engine docs](https://docs.docker.com/engine/network/firewall-nftables/). +- containerd image store is now the default for **fresh installs**. This doesn't apply to daemons configured with `userns-remap` (see [moby#47377](https://github.com/moby/moby/issues/47377)). + +### Breaking Changes + +- The Go module `github.com/docker/docker` is deprecated in favor of `github.com/moby/moby/client` and `github.com/moby/moby/api`. The `github.com/moby/moby` module is considered an **internal implementation detail** - the only supported public modules are `client` and `api`. + Starting with v29, releases are tagged with the `docker-` prefix (e.g., `docker-v29.0.0`). **This only affects Go module users and package maintainers.** +- The daemon now requires API version `v1.44` or later (Docker v25.0+). +- Debian armhf (32-bit) packages now target ARMv7 CPUs and will not work on ARMv6 devices. +- Official Raspbian (32-bit) packages are no longer provided. Use Debian arm64 packages for 64-bit devices, or Debian armhf packages for 32-bit ARMv7 devices. +- **cgroup v1 is deprecated.** Support continues until at least May 2029, but migrate to cgroup v2 as soon as possible. See [moby#51111](https://github.com/moby/moby/issues/51111). +- Docker Content Trust was removed from the Docker CLI. Can be built as a separate plugin: https://github.com/docker/cli/blob/v29.0.0/cmd/docker-trust/main.go + +--- + +### New + +- `docker image load` and `docker image save` now supports multiple platform selection via `--platform` flag (e.g., `docker image load --platform linux/amd64,linux/arm64 -i image.tar`). [docker/cli#6126](https://github.com/docker/cli/pull/6126) +- `docker image ls` now uses the new view (like `--tree` but collapsed) by default. [docker/cli#6566](https://github.com/docker/cli/pull/6566) +- `docker run --runtime <...>` is now supported on Windows. [moby/moby#50546](https://github.com/moby/moby/pull/50546) +- `GET /containers/json` now includes a `Health` field describing container healthcheck status. [moby/moby#50281](https://github.com/moby/moby/pull/50281) +- Add `device` entitlement to builder configuration. [moby/moby#50386](https://github.com/moby/moby/pull/50386) +- Add support for `memory-swap` and `memory-swappiness` flags to `docker service create` and `docker service update` commands. [docker/cli#6619](https://github.com/docker/cli/pull/6619) +- Allow Docker CLI to set the `GODEBUG` environment variable when the key-value pair (`"GODEBUG":"..."`) exists inside the Docker context metadata. [docker/cli#6371](https://github.com/docker/cli/pull/6371) + +### Bug fixes and enhancements + +- `docker image ls --tree` now sorts images alphabetically by name instead of by creation date. [docker/cli#6595](https://github.com/docker/cli/pull/6595) +- `docker image ls` no longer shows untagged images by default if no `--all` flag is provided. [docker/cli#6574](https://github.com/docker/cli/pull/6574) +- `docker save`: Fixed inconsistent tar member timestamps when exporting images with the overlay2 storage driver. [moby/moby#51365](https://github.com/moby/moby/pull/51365) +- Add a new log option for fluentd log driver (`fluentd-read-timeout`), which enables specifying read timeouts for reading acks from fluentd connections. [moby/moby#50249](https://github.com/moby/moby/pull/50249) +- Add image name completion for `docker images`. [docker/cli#6452](https://github.com/docker/cli/pull/6452) +- Add shell completion for `docker inspect` if a `--type` is set. [docker/cli#6444](https://github.com/docker/cli/pull/6444) +- Add shell completion for `docker plugin` subcommands. [docker/cli#6445](https://github.com/docker/cli/pull/6445) +- api/types/container: make ContainerState, HealthStatus concrete types. [moby/moby#51439](https://github.com/moby/moby/pull/51439) +- containerd image store is temporarily not available when userns remapping is enabled as a workaround for [moby#47377](https://github.com/moby/moby/issues/47377). [moby/moby#51042](https://github.com/moby/moby/pull/51042) +- contrib: remove contrib/httpserver, which was only used for integration tests. [moby/moby#50654](https://github.com/moby/moby/pull/50654) +- daemon: improve validation of the `--dns` option and corresponding `"dns"` field in `daemon.json`. [moby/moby#50600](https://github.com/moby/moby/pull/50600) +- dockerd-rootless.sh: if slirp4netns is not installed, try using pasta (passt). [moby/moby#51149](https://github.com/moby/moby/pull/51149) +- Fix `--mount type=image` failure when mounting the same image multiple times to a different destinations. [moby/moby#50268](https://github.com/moby/moby/pull/50268) +- Fix `docker stats ` not exiting gracefully. [docker/cli#6582](https://github.com/docker/cli/pull/6582) +- Fix a bug preventing the API server from shutting down quickly when there's an open connection to the `/events` endpoint. [moby/moby#51448](https://github.com/moby/moby/pull/51448) +- Fix a bug where collecting container stats in "one-shot" mode would not include the container's ID and Name. [moby/moby#51302](https://github.com/moby/moby/pull/51302) +- Fix an issue where all new tasks in the Swarm could get stuck in the PENDING state forever after scaling up a service with placement preferences. [moby/moby#50202](https://github.com/moby/moby/pull/50202) +- Fix issue where custom meta-headers were not passed through when using the containerd image store. [moby/moby#51024](https://github.com/moby/moby/pull/51024) +- Fix requests not being logged when running the daemon with `--log-level=trace`. [moby/moby#50986](https://github.com/moby/moby/pull/50986) +- Fix Swarm services becoming unreachable from published ports after a firewalld reload. [moby/moby#50443](https://github.com/moby/moby/pull/50443) +- Improve errors when failing to connect to the API to provide more context to the user. [moby/moby#50285](https://github.com/moby/moby/pull/50285) +- Improve shell completion for `docker secret` and `docker config` subcommands. [docker/cli#6446](https://github.com/docker/cli/pull/6446) +- Prefer explicit device driver name over GPU capabilities when selecting the device driver with `docker run --gpus`. [moby/moby#50717](https://github.com/moby/moby/pull/50717) +- Update runc to [v1.3.3](https://github.com/opencontainers/runc/releases/tag/v1.3.3). [moby/moby#51393](https://github.com/moby/moby/pull/51393) +- Update SwarmKit internal TLS configuration to exclude known insecure cipher suites. [moby/moby#51139](https://github.com/moby/moby/pull/51139) +- Windows: Fix BuildKit creating containers which isolation mode is inconsistent with the daemon's config. [moby/moby#50942](https://github.com/moby/moby/pull/50942) + +### Packaging updates + +- client: remove legacy CBC cipher suites from client config. [moby/moby#50126](https://github.com/moby/moby/pull/50126) +- contrib: remove `editorconfig` as it was unmaintained. [moby/moby#50607](https://github.com/moby/moby/pull/50607) +- contrib: remove Dockerfile syntax highlighting files for `nano` and TextMate (`tmbundle`) as they were unmaintained and outdated. [moby/moby#50606](https://github.com/moby/moby/pull/50606) +- contrib: remove mkimage-xxx scripts as they were unmaintained and not tested. [moby/moby#50297](https://github.com/moby/moby/pull/50297) +- If Docker is downgraded to a version that does not have this support the network will become unusable, it must be deleted and re-created. [moby/moby#50114](https://github.com/moby/moby/pull/50114) +- The Windows overlay network driver now supports option `--dns`. [moby/moby#51229](https://github.com/moby/moby/pull/51229) +- Update BuildKit to [v0.25.2](https://github.com/moby/buildkit/releases/tag/v0.25.2). [moby/moby#51397](https://github.com/moby/moby/pull/51397) +- Update containerd to [v2.1.5](https://github.com/containerd/containerd/releases/tag/v2.1.5). [moby/moby#51409](https://github.com/moby/moby/pull/51409) + + containerd v2.1.5 now uses systemd's default `LimitNOFILE` for containers, + changing the open file descriptor limit (`ulimit -n`) from `1048576` to + `1024`. This extends a change introduced in Docker Engine v25.0 for build + containers to all containers. + + This prevents programs that adjust behavior based on ulimits from consuming + excessive memory when the limit is set to `infinity`. Containers now behave + the same way as programs running on the host. + + If your workload needs a higher limit, use `--ulimit` with `docker run`, or + set defaults in `/etc/docker/daemon.json`: + + ```json + { + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Soft": 1048576, + "Hard": 1048576 + } + } + } + ``` + + For more information, see [moby#51485](https://github.com/moby/moby/issues/51485). +- Update Go runtime to [1.25.4](https://go.dev/doc/devel/release#go1.25.4). [moby/moby#51418](https://github.com/moby/moby/pull/51418), [docker/cli#6632](https://github.com/docker/cli/pull/6632) +- Users can request a specific prefix size for networks allocated from the default pools by using the unspecified address, for example `--subnet 0.0.0.0/24 --subnet ::/96`. [moby/moby#50114](https://github.com/moby/moby/pull/50114) + +### Networking + +- Add daemon option `--bridge-accept-fwmark`. Packets with this firewall mark will accepted by bridge networks, overriding Docker's iptables or nftables "drop" rules. [moby/moby#50476](https://github.com/moby/moby/pull/50476) +- api/types/system: deprecated top level `DiskUsage` fields for type specific fields. [moby/moby#51235](https://github.com/moby/moby/pull/51235) +- Ensure bridge devices are removed when bridge network creation fails. [moby/moby#51147](https://github.com/moby/moby/pull/51147) +- Ensure that Windows NAT networks are recreated with their original labels when the Engine restarts. [moby/moby#50447](https://github.com/moby/moby/pull/50447) +- Environment variables set on a container using legacy links are deprecated and aren't added automatically anymore. [moby/moby#50719](https://github.com/moby/moby/pull/50719) + - The daemon can be started with `DOCKER_KEEP_DEPRECATED_LEGACY_LINKS_ENV_VARS=1` to get them back + - Users are encouraged to stop relying on these as they're deprecated, and the escape hatch will be removed in a later version +- Fix a bug in NetworkDB which would sometimes cause entries to get stuck deleted on some of the nodes, leading to connectivity issues between containers on overlay networks. [moby/moby#50342](https://github.com/moby/moby/pull/50342) +- Fix a bug that could cause the Engine and another host process to bind the same UDP port. [moby/moby#50669](https://github.com/moby/moby/pull/50669) +- Fix a deadlock that could happen if a firewalld reload was processed while the bridge networking driver was starting up, or creating or deleting a network, or creating port-mappings. [moby/moby#50620](https://github.com/moby/moby/pull/50620) +- Fix an issue preventing container startup or selection of its network gateway when IPv6 is only disabled on a specific interface. [moby/moby#48971](https://github.com/moby/moby/pull/48971) +- For Linux, `docker info` now reports the firewall backend if available. [docker/cli#6191](https://github.com/docker/cli/pull/6191) +- Greatly improve the reliability of overlay networking and the Swarm routing mesh. [moby/moby#50393](https://github.com/moby/moby/pull/50393) +- Improve the convergence rate of NetworkDB, part of the management plane for overlay networking, after bursts of updates. [moby/moby#50193](https://github.com/moby/moby/pull/50193) +- Improve the reliability of the overlay network driver. [moby/moby#50260](https://github.com/moby/moby/pull/50260) +- Improved error handling for connection of a container to a network. [moby/moby#50945](https://github.com/moby/moby/pull/50945) +- macvlan and IPvlan-l2 networks: no default gateway will be configured unless a `--gateway` is explicitly included in IPAM configuration. This addresses an issue which could cause container startup to fail in networks with IPv6 auto-configuration enabled. [moby/moby#50929](https://github.com/moby/moby/pull/50929) +- nftables: Docker will not enable IP forwarding on the host. If forwarding is needed by a bridge network, but not enabled, daemon startup or network creation will fail with an error. You must either enable forwarding and ensure firewall rules are in place to prevent unwanted forwarding between non-Docker interfaces. Or, use daemon option `--ip-forward=false` to disable the check, but some bridge network functionality including port forwarding may not work. See [Engine Docs](https://docs.docker.com/engine/network/firewall-nftables) for more information about migration from iptables to nftables. [moby/moby#50646](https://github.com/moby/moby/pull/50646) +- On daemon startup, restart containers that share their network stacks before containers that need those stacks. [moby/moby#50327](https://github.com/moby/moby/pull/50327) +- Published ports are now always accessible in networks with gateway mode "routed". Previously, rules to open those ports were only added when the routed mode network was selected as the container's default gateway. [moby/moby#50140](https://github.com/moby/moby/pull/50140) +- Since 28.0.0, an `iptables` mangle rule for checksumming SCTP was only added if environment variable `DOCKER_IPTABLES_SCTP_CHECKSUM=1` was set. The rule has now been removed, the environment variable now has no effect. [moby/moby#50539](https://github.com/moby/moby/pull/50539) +- The iptables rules for bridge networks have been updated, including removal of the `DOCKER-ISOLATION-STAGE-1` and `DOCKER-ISOLATION-STAGE-2` chains. With these changes:. [moby/moby#49981](https://github.com/moby/moby/pull/49981) + - Containers can now access ports published to host addresses by containers in other networks when the userland-proxy is not running + - Containers can now access ports on container addresses in other networks that have gateway mode "nat-unprotected" +- When dynamically linked, the Docker daemon now depends on libnftables. [moby/moby#51033](https://github.com/moby/moby/pull/51033) +- Windows: `network inspect`: the HNS network name is now reported in option `com.docker.network.windowsshim.networkname` rather than the Docker network name, which was only reported after a daemon restart. [moby/moby#50961](https://github.com/moby/moby/pull/50961) +- Windows: when restoring networks on daemon restart, preserve their association with non-default IPAM drivers. [moby/moby#50649](https://github.com/moby/moby/pull/50649) + +### API + +- `events` API now reports content-type as `application/x-ndjson` for newline-delimited JSON event stream. [moby/moby#50953](https://github.com/moby/moby/pull/50953) +- `GET /images/{name}/get` and `POST /images/load` now accept multiple `platform` query parameters, allowing export and load of images for multiple platforms. [moby/moby#50166](https://github.com/moby/moby/pull/50166) +- `GET /images/{name}/json` now omits the following fields if their value is empty: `Parent`, `Comment`, `DockerVersion`, `Author`. [moby/moby#51072](https://github.com/moby/moby/pull/51072) +- `GET /images/{name}/json`: omit empty `Config` fields when not set. [moby/moby#50915](https://github.com/moby/moby/pull/50915) +- `POST /images/{name:}/push`: remove compatibility with API v1.4 auth-config in body. [moby/moby#50371](https://github.com/moby/moby/pull/50371) +- Add support for memory swappiness in Swarm services. [moby/moby#51114](https://github.com/moby/moby/pull/51114) + - `GET /services` now returns `SwapBytes` and `MemorySwappiness` fields as part of the `Resource` requirements + - `GET /services/{id}` now returns `SwapBytes` and `MemorySwappiness` fields as part of the `Resource` requirements + - `POST /services/create` now accepts `SwapBytes` and `MemorySwappiness` fields as part of the `Resource` requirements + - `POST /services/{id}/update` now accepts `SwapBytes` and `MemorySwappiness` fields as part of the `Resource` requirements + - `GET /tasks` now returns `SwapBytes` and `MemorySwappiness` fields as part of the `Resource` requirements + - `GET /tasks/{id}` now returns `SwapBytes` and `MemorySwappiness` fields as part of the `Resource` requirements +- api/types/build: move `CachePruneOptions` type to `client.BuildCachePruneOptions`. [moby/moby#50772](https://github.com/moby/moby/pull/50772) +- api/types/checkpoint: move checkpoint options to client module. [moby/moby#50905](https://github.com/moby/moby/pull/50905) +- api/types/container: `OnBuild` will now be omitted if its value is empty or zero. [moby/moby#51154](https://github.com/moby/moby/pull/51154) +- api/types/container: make the container config `MacAddress` obsolete for v1.52 and onwards. Use network endpoint settings instead. [moby/moby#51189](https://github.com/moby/moby/pull/51189) +- api/types/container: move `ResizeOptions` type to `ContainerResizeOptions` in the client. [moby/moby#50773](https://github.com/moby/moby/pull/50773) +- api/types/events: move `ListOptions` type to the client `EventsListOptions`. [moby/moby#50774](https://github.com/moby/moby/pull/50774) +- api/types/image: move image options out to the client. [moby/moby#50776](https://github.com/moby/moby/pull/50776) +- api/types/network: move `CreateOptions`, `ConnectOptions` and `DisconnectOptions` to the client module. [moby/moby#50817](https://github.com/moby/moby/pull/50817) +- api/types/network: move the `ListOptions` and `InspectOptions` types to the client. [moby/moby#50786](https://github.com/moby/moby/pull/50786) +- api/types/plugin: change `ListResponse` to a non-pointer slice. [moby/moby#51440](https://github.com/moby/moby/pull/51440) +- api/types/plugin: remove deprecated `Config.DockerVersion`. [moby/moby#51458](https://github.com/moby/moby/pull/51458) +- api/types/registry: move `SearchOptions` to `ImageSearchOptions` in the client. [moby/moby#50787](https://github.com/moby/moby/pull/50787) +- api/types/registry: moved `ServiceConfig` legacy field marshaling support into daemon backend. [moby/moby#50826](https://github.com/moby/moby/pull/50826) +- api/types/registry: moved encode/decode auth config functions into reference utility package. [moby/moby#50785](https://github.com/moby/moby/pull/50785) +- api/types/storage: add `Storage` type and integrate in container inspect. [moby/moby#50857](https://github.com/moby/moby/pull/50857) +- api/types/swarm: deprecated and dropped support for `PortConfigProtocol`; use `network.IPProtocol` instead. [moby/moby#51094](https://github.com/moby/moby/pull/51094) +- api/types/swarm: move option types to the client module. [moby/moby#50794](https://github.com/moby/moby/pull/50794) +- api/types/swarm: move the `SecretListOptions` type to the client module. [moby/moby#50816](https://github.com/moby/moby/pull/50816) +- api/types/system: move `DiskUsageOptions` to the client. [moby/moby#50788](https://github.com/moby/moby/pull/50788) +- api/types/system: move `SecurityOpt` and `DecodeSecurityOptions` to client module. [moby/moby#50825](https://github.com/moby/moby/pull/50825) +- api/types/volume: change ListResponse.Volumes to a non-pointer slice. [moby/moby#51454](https://github.com/moby/moby/pull/51454) +- api/types/volume: move the `ListOptions` type to the client module. [moby/moby#50789](https://github.com/moby/moby/pull/50789) +- api/types/volume: moved `UpdateOptions` into client module. [moby/moby#51205](https://github.com/moby/moby/pull/51205) +- api/types: daemon: move the disk usage structs to the backend server. [moby/moby#50764](https://github.com/moby/moby/pull/50764) +- api: make `GraphDriver` field in `image.InspectResponse` optional. This field will continue to be emitted when using the legacy graph drivers and will be omitted when using the containerd image backend. [moby/moby#50893](https://github.com/moby/moby/pull/50893) +- api: redefine container network port types. [moby/moby#50710](https://github.com/moby/moby/pull/50710) +- client: PluginListResult: change Items field to a non-pointer slice. [moby/moby#51440](https://github.com/moby/moby/pull/51440) +- Inspecting networks with API v1.52 and newer provides statistics about IPAM allocations for the subnets assigned to the network. [moby/moby#50917](https://github.com/moby/moby/pull/50917) +- MAC address fields are represented as byte slices compatible with the standard library net.HardwareAddr type instead of strings. [moby/moby#51355](https://github.com/moby/moby/pull/51355) +- Swagger definitions for `NetworkSummary` and `NetworkInspect` have been added to the Swagger spec describing the Engine API. [moby/moby#50855](https://github.com/moby/moby/pull/50855) +- Update API version to 1.52. [moby/moby#50418](https://github.com/moby/moby/pull/50418) + +### Go SDK + +- `api/pkg/progress` and `api/pkg/streamformatter` have been removed. [moby/moby#51153](https://github.com/moby/moby/pull/51153) +- `api/types/registry`: `EncodeAuthConfig`: use empty string for zero value. [moby/moby#50426](https://github.com/moby/moby/pull/50426) +- `api/types/versions` has moved to the client and daemon. [moby/moby#51284](https://github.com/moby/moby/pull/51284) +- `client.ConfigCreate`, `client.ConfigList`, `client.ConfigInspectWithRaw`, `client.ConfigUpdate`, and `client.ConfigRemove` methods now accept option structs instead of positional arguments, and return dedicated result structs. [moby/moby#51078](https://github.com/moby/moby/pull/51078) +- `client.ImageBuild`, `client.BuildCancel`, `client.ImageList`, `client.ImageRemove`, `client.ImageTag`, and `client.ImageSearch` methods now accept option structs instead of positional arguments, and return dedicated result structs. [moby/moby#51227](https://github.com/moby/moby/pull/51227) +- `client`: `ContainerExec...` methods were renamed to `Exec...`. [moby/moby#51262](https://github.com/moby/moby/pull/51262) +- `client`: Wrap return values of `ImageInspect`, `ImageHistory`, `ImageLoad` and `ImageSave` in a struct. [moby/moby#51236](https://github.com/moby/moby/pull/51236) +- `ImagePull` now returns an object with `JSONMessages` method returning iterator over the message objects. [moby/moby#50935](https://github.com/moby/moby/pull/50935) +- `ImagePush` now returns an object with `JSONMessages` method returning iterator over the message objects. [moby/moby#51148](https://github.com/moby/moby/pull/51148) +- api/types/container: move `StatsResponseReader` to `client` package. [moby/moby#50521](https://github.com/moby/moby/pull/50521) +- api/types/container: move container options to client. [moby/moby#50897](https://github.com/moby/moby/pull/50897) +- api/types/container: rename `Port` to `PortSummary`. [moby/moby#50711](https://github.com/moby/moby/pull/50711) +- api/types/container: StatsResponse: add `OSType` field. [moby/moby#51305](https://github.com/moby/moby/pull/51305) +- api/types: move `ErrorResponse` to `common/ErrorResponse`. [moby/moby#50632](https://github.com/moby/moby/pull/50632) +- api: remove unused `DefaultVersion`, `MinSupportedAPIVersion` consts. [moby/moby#50587](https://github.com/moby/moby/pull/50587) +- cli/command: add `WithUserAgent` option. [docker/cli#4574](https://github.com/docker/cli/pull/4574) +- client: `ContainerCommitOptions`: remove `Pause` field in favor of `NoPause`. [moby/moby#51019](https://github.com/moby/moby/pull/51019) +- client: add `DefaultAPIVersion` const, which defines the default (and maximum) API version supported by the client. [moby/moby#50433](https://github.com/moby/moby/pull/50433) +- client: add `ExecAPIClient` interface for exec methods provided by the client. [moby/moby#50997](https://github.com/moby/moby/pull/50997) +- client: Client.PluginList: add options-struct. [moby/moby#51207](https://github.com/moby/moby/pull/51207) +- client: ContainersPrune: rewrite to use option structs and result. [moby/moby#51200](https://github.com/moby/moby/pull/51200) +- client: ImagesPrune: rewrite to use option structs and result. [moby/moby#51200](https://github.com/moby/moby/pull/51200) +- client: NetworksPrune: rewrite to use option structs and result. [moby/moby#51200](https://github.com/moby/moby/pull/51200) +- client: remove `client.ContainerStatsResult.OSType` field. [moby/moby#51305](https://github.com/moby/moby/pull/51305) +- client: VolumesPrune: rewrite to use option structs and result. [moby/moby#51200](https://github.com/moby/moby/pull/51200) +- daemon/config: add `DefaultAPIVersion` const, which defines the default (and maximum) API version supported by the daemon. [moby/moby#50436](https://github.com/moby/moby/pull/50436) +- Fix data race in `ContainerExecStart`, `ContainerList`, and `Events`. [moby/moby#50448](https://github.com/moby/moby/pull/50448) +- IP addresses and subnets are now of type `netip.Addr` and `netip.Prefix`, respectively. [moby/moby#50956](https://github.com/moby/moby/pull/50956) +- Remove structs `NetworkSettingsBase` and `DefaultNetworkSettings`. Fields in `NetworkSettingsBase` that were not deprecated are now directly in `NetworkSettings`. [moby/moby#50846](https://github.com/moby/moby/pull/50846) +- the client now uses its own `client.Filters` type for filtering API requests, with a more ergonomic interface. Users of the `github.com/docker/docker/api/types/filters` package will need to refactor when they upgrade to the v29 client. [moby/moby#51115](https://github.com/moby/moby/pull/51115) +- Types `"github.com/moby/moby/api/types/network".Summary` and `"github.com/moby/moby/api/types/network".Inspect` are no longer aliases, and most of their fields have been moved into an embedded struct. Engine API clients may require some source-level changes when migrating to the new github.com/moby/moby/api module. [moby/moby#50878](https://github.com/moby/moby/pull/50878) +- Update minimum Go version to 1.24. [docker/cli#6624](https://github.com/docker/cli/pull/6624) + +### Deprecations + +- `client/pkg/jsonmessage`: remove deprecated `ProgressMessage`, `ErrorMessage`, `DisplayJSONMessagesToStream` and `Stream` interface. [moby/moby#49264](https://github.com/moby/moby/pull/49264) +- `GET /events` no longer includes the deprecated `status`, `id`, and `from` fields. These fields were removed in API v1.22, but still included in the response. These fields are now omitted when using API v1.52 or later. [moby/moby#50832](https://github.com/moby/moby/pull/50832) +- api/types/network: CreateRequest: remove deprecated CheckDuplicate field. [moby/moby#50998](https://github.com/moby/moby/pull/50998) +- api/types/plugin: deprecate `Config.DockerVersion` field. [moby/moby#51109](https://github.com/moby/moby/pull/51109) +- api/types/registry: remove deprecated AuthConfig.Email field. [moby/moby#51059](https://github.com/moby/moby/pull/51059) +- api/types/strslice: deprecate StrSlice in favor of using a regular `[]string`. [moby/moby#50292](https://github.com/moby/moby/pull/50292) +- api/types/system: remove deprecated `DiskUsage.BuilderSize`. [moby/moby#51180](https://github.com/moby/moby/pull/51180) +- api/types: move plugin types to api/types/plugin. [moby/moby#48114](https://github.com/moby/moby/pull/48114) +- API: Deprecation: the Engine was automatically backfilling empty `PortBindings` lists with a PortBinding with an empty HostIP and HostPort when starting a container. This behavior is deprecated for API 1.52, and will be dropped in API 1.53. [moby/moby#50874](https://github.com/moby/moby/pull/50874) +- build: remove DCT support for classic builder. [docker/cli#6195](https://github.com/docker/cli/pull/6195) +- cli/command: Remove deprecated `ResolveDefaultContext`. [docker/cli#6555](https://github.com/docker/cli/pull/6555) +- client: ImageBuildResponse: remove OSType field. [moby/moby#50995](https://github.com/moby/moby/pull/50995) +- client: Remove `ImageCreate` method - use`ImagePull` or `ImageImport` instead. [moby/moby#51366](https://github.com/moby/moby/pull/51366) +- client: remove deprecated `ImageListOptions.ContainerCount`. [moby/moby#51006](https://github.com/moby/moby/pull/51006) +- client: remove support for negotiating API version < v1.44 (docker 25.0). [moby/moby#51119](https://github.com/moby/moby/pull/51119) +- client: remove unused `Client.HTTPClient()` method. [moby/moby#51011](https://github.com/moby/moby/pull/51011) +- daemon/graphdriver: remove deprecated `GetDriver()`. [moby/moby#50377](https://github.com/moby/moby/pull/50377) +- daemon: raise minimum API version to v1.44. [moby/moby#51186](https://github.com/moby/moby/pull/51186) +- Deprecate the `--pause` flag on `docker commit` in favor of `--no-pause`. [docker/cli#6460](https://github.com/docker/cli/pull/6460) +- Deprecate cgroup v1. [moby/moby#51360](https://github.com/moby/moby/pull/51360), [docker/cli#6598](https://github.com/docker/cli/pull/6598) +- Go SDK: `cli-plugins/manager`: deprecate metadata aliases in favor of their equivalent in `cli-plugins/manager/metadata`. [docker/cli#6237](https://github.com/docker/cli/pull/6237) +- Go SDK: `cli-plugins/manager`: remove `Candidate` interface, which was only for internal use. [docker/cli#6237](https://github.com/docker/cli/pull/6237) +- Go SDK: `cli-plugins/manager`: remove `NewPluginError` function, which was only for internal use. [docker/cli#6237](https://github.com/docker/cli/pull/6237) +- Go SDK: `cli-plugins/manager`: remove deprecated `ResourceAttributesEnvvar` const. [docker/cli#6237](https://github.com/docker/cli/pull/6237) +- Go SDK: `cli/command`: remove the `ErrPromptTerminated`, `DisableInputEcho`, `PromptForInput`, and `PromptForConfirmation` utilities. These utilities were for internal use and are no longer used. [docker/cli#6243](https://github.com/docker/cli/pull/6243) +- Go SDK: `cli/registry/client`: remove deprecated `RepoNameForReference`. [docker/cli#6206](https://github.com/docker/cli/pull/6206) +- Go SDK: api/types/build: remove deprecated BuildCache.Parent field. [moby/moby#51185](https://github.com/moby/moby/pull/51185) +- Go SDK: api/types/container: remove deprecated `ContainerTopOKBody` alias. [moby/moby#50400](https://github.com/moby/moby/pull/50400) +- Go SDK: api/types/container: remove deprecated `ContainerUpdateOKBody` alias. [moby/moby#50400](https://github.com/moby/moby/pull/50400) +- Go SDK: api/types/container: remove deprecated `Stats` type. [moby/moby#50492](https://github.com/moby/moby/pull/50492) +- Go SDK: api/types/filters: remove deprecated `ToParamWithVersion`. [moby/moby#50561](https://github.com/moby/moby/pull/50561) +- Go SDK: api/types/image: `InspectResponse`: remove deprecated `VirtualSize`, `Container`, `ContainerConfig`, `Parent`, and `DockerVersion` fields. [moby/moby#51103](https://github.com/moby/moby/pull/51103) +- Go SDK: api/types/image: remove deprecated Summary.VirtualSize field. [moby/moby#51190](https://github.com/moby/moby/pull/51190) +- Go SDK: api/types/registry: remove deprecated `ServiceConfig.AllowNondistributableArtifactsCIDRs` and `ServiceConfig.AllowNondistributableArtifactsHostnames` fields. [moby/moby#50375](https://github.com/moby/moby/pull/50375) +- Go SDK: api/types/swarm: remove deprecated ServiceSpec.Networks field. [moby/moby#51184](https://github.com/moby/moby/pull/51184) +- GO SDK: api/types/system: remove deprecated `Commit.Expected` field. [moby/moby#51127](https://github.com/moby/moby/pull/51127) +- Go SDK: api/types: remove deprecated aliases. [moby/moby#50452](https://github.com/moby/moby/pull/50452) +- Go SDK: api: deprecate `NoBaseImageSpecifier` const. This const is no longer used and will be removed in the next release. [moby/moby#50437](https://github.com/moby/moby/pull/50437) +- Go SDK: api: remove `NoBaseImageSpecifier`. [moby/moby#50574](https://github.com/moby/moby/pull/50574) +- Go SDK: cli/command/builder: remove `CachePrune()`, which was no longer used. [docker/cli#6236](https://github.com/docker/cli/pull/6236) +- Go SDK: cli/command/builder: remove `NewBuilderCommand` and `NewBakeStubCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/checkpoint: remove `NewCheckpointCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/checkpoint: remove deprecated `NewFormat`, `FormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/completion: remove deprecated `NoComplete`. [docker/cli#6408](https://github.com/docker/cli/pull/6408) +- Go SDK: cli/command/config: remove `NewConfigCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/config: remove deprecated `NewFormat`, `FormatWrite`, `InspectFormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/config: remove deprecated `RunConfigCreate`, `CreateOptions`, `RunConfigInspect`, `InspectOptions`, `RunConfigList`, `ListOptions`, `RunConfigRemove`, and `RemoveOptions`. [docker/cli#6370](https://github.com/docker/cli/pull/6370) +- Go SDK: cli/command/container: deprecate `NewDiffFormat`, `DiffFormatWrite`. These functions were only used internally and will be removed in the next release. [docker/cli#6187](https://github.com/docker/cli/pull/6187) +- Go SDK: cli/command/container: remove `NewBuildCommand`, `NewPullCommand`, `NewPushCommand`, `NewImagesCommand`, `NewImageCommand`, `NewHistoryCommand`, `NewImportCommand`, `NewLoadCommand`, `NewRemoveCommand`, `NewSaveCommand`, `NewTagCommand`, `NewPruneCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/container: remove `NewRunCommand`, `NewExecCommand`, `NewPsCommand`, `NewContainerCommand`, `NewAttachCommand`, `NewCommitCommand`, `NewCopyCommand`, `NewCreateCommand`, `NewDiffCommand`, `NewExportCommand`, `NewKillCommand`, `NewLogsCommand`, `NewPauseCommand`, `NewPortCommand`, `NewRenameCommand`, `NewRestartCommand`, `NewRmCommand`, `NewStartCommand`, `NewStatsCommand`, `NewStopCommand`, `NewTopCommand`, `NewUnpauseCommand`, `NewUpdateCommand`, `NewWaitCommand`, `NewPruneCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/container: remove `RunPrune()`, which was no longer used. [docker/cli#6236](https://github.com/docker/cli/pull/6236) +- Go SDK: cli/command/container: remove deprecated `NewDiffFormat`, `DiffFormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/context: remove `NewContextCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/context: remove deprecated `RunCreate` and `CreateOptions`. [docker/cli#6407](https://github.com/docker/cli/pull/6407) +- Go SDK: cli/command/context: remove deprecated `RunExport` and `ExportOptions`. [docker/cli#6407](https://github.com/docker/cli/pull/6407) +- Go SDK: cli/command/context: remove deprecated `RunImport`. [docker/cli#6407](https://github.com/docker/cli/pull/6407) +- Go SDK: cli/command/context: remove deprecated `RunRemove` and `RemoveOptions`. [docker/cli#6407](https://github.com/docker/cli/pull/6407) +- Go SDK: cli/command/context: remove deprecated `RunUpdate` and `UpdateOptions`. [docker/cli#6407](https://github.com/docker/cli/pull/6407) +- Go SDK: cli/command/context: remove deprecated `RunUse`. [docker/cli#6407](https://github.com/docker/cli/pull/6407) +- Go SDK: cli/command/formatter/swarm: remove deprecated `GetStacks` function. [docker/cli#6406](https://github.com/docker/cli/pull/6406) +- Go SDK: cli/command/image/build: deprecate `DefaultDockerfileName`, `DetectArchiveReader`, `WriteTempDockerfile`, `ResolveAndValidateContextPath`. These utilities were only used internally and will be removed in the next release. [docker/cli#6561](https://github.com/docker/cli/pull/6561) +- Go SDK: cli/command/image: remove `RunPrune()`, which was no longer used. [docker/cli#6236](https://github.com/docker/cli/pull/6236) +- Go SDK: cli/command/image: remove deprecated `AuthResolver` utility. [docker/cli#6373](https://github.com/docker/cli/pull/6373) +- Go SDK: cli/command/image: remove deprecated `NewHistoryFormat`, `HistoryWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339), [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/manifest: remove `NewManifestCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/network: remove `NewNetworkCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/network: remove `RunPrune()`, which was no longer used. [docker/cli#6236](https://github.com/docker/cli/pull/6236) +- Go SDK: cli/command/network: remove deprecated `NewFormat`, `FormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/node: remove `NewNodeCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/node: remove deprecated `NewFormat`, `FormatWrite`, `InspectFormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/plugin: remove `NewPluginCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/plugin: remove deprecated `NewFormat`, `FormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/registry: remove `NewLoginCommand`, `NewLogoutCommand`, `NewSearchCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/registry: remove deprecated `NewSearchFormat`, `SearchWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/registry: remove deprecated `OauthLoginEscapeHatchEnvVar` const. [docker/cli#6463](https://github.com/docker/cli/pull/6463) +- Go SDK: cli/command/secret: remove `NewSecretCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/secret: remove deprecated `NewFormat`, `FormatWrite`, `InspectFormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/service: remove `NewServiceCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/service: remove deprecated `NewFormat`, `InspectFormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/stack/swarm: remove deprecated RunPS and options.PS. [docker/cli#6398](https://github.com/docker/cli/pull/6398) +- Go SDK: cli/command/stack: remove `NewStackCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/stack: remove deprecated RunList and options.List. [docker/cli#6398](https://github.com/docker/cli/pull/6398) +- Go SDK: cli/command/stack: remove deprecated RunServices and swarm.GetServices. [docker/cli#6398](https://github.com/docker/cli/pull/6398) +- Go SDK: cli/command/swarm: remove `NewSwarmCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/system: remove `NewVersionCommand`, `NewInfoCommand`, `NewSystemCommand`, `NewEventsCommand`, `NewInspectCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/task: remove deprecated `NewTaskFormat`, `FormatWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/trust: remove `NewTrustCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/trust: remove deprecated `NewPruneCommand`. [docker/cli#6344](https://github.com/docker/cli/pull/6344) +- Go SDK: cli/command/trust: remove deprecated `SignedTagInfo`, `SignerInfo`, `NewTrustTagFormat`, `NewSignerInfoFormat`, `TagWrite`, `SignerInfoWrite`. [docker/cli#6339](https://github.com/docker/cli/pull/6339) +- Go SDK: cli/command/volume: remove `NewVolumeCommand`, `NewPruneCommand`. [docker/cli#6335](https://github.com/docker/cli/pull/6335) +- Go SDK: cli/command/volume: remove `RunPrune()`, which was no longer used. [docker/cli#6236](https://github.com/docker/cli/pull/6236) +- Go SDK: cli/command: remove `AddTrustSigningFlags`, `AddTrustVerificationFlags`, and `AddPlatformFlag` utilities, which were only used internally. [docker/cli#6244](https://github.com/docker/cli/pull/6244) +- Go SDK: cli/command: remove deprecated `DockerCli.Apply`. [docker/cli#6503](https://github.com/docker/cli/pull/6503) +- Go SDK: cli/command: remove deprecated `DockerCli.ContentTrustEnabled`. [docker/cli#6502](https://github.com/docker/cli/pull/6502) +- Go SDK: cli/command: remove deprecated `DockerCli.DefaultVersion`. [docker/cli#6502](https://github.com/docker/cli/pull/6502) +- Go SDK: cli/command: remove deprecated `RegistryAuthenticationPrivilegedFunc`. [docker/cli#6349](https://github.com/docker/cli/pull/6349) +- Go SDK: cli/command: remove deprecated `WithContentTrustFromEnv`, `WithContentTrust` options. [docker/cli#6502](https://github.com/docker/cli/pull/6502) +- Go SDK: cli/config/configfile: remove deprecated `ConfigFile.Experimental` field. [docker/cli#6464](https://github.com/docker/cli/pull/6464) +- Go SDK: cli/config/types: remove deprecated `AuthConfig.Email` field. [docker/cli#6515](https://github.com/docker/cli/pull/6515) +- Go SDK: cli/manifest/store: remove deprecated `IsNotFound`. [docker/cli#6523](https://github.com/docker/cli/pull/6523) +- Go SDK: cli: remove deprecated `VisitAll`, `DisableFlagsInUseLine` utilities. [docker/cli#6296](https://github.com/docker/cli/pull/6296) +- Go SDK: client: remove `APIClient.ImageInspectWithRaw` from the `APIClient` interface. [moby/moby#50485](https://github.com/moby/moby/pull/50485) +- Go SDK: client: remove `ImageAPIClient.ImageInspectWithRaw` from the `ImageAPIClient` interface. [moby/moby#50485](https://github.com/moby/moby/pull/50485) +- Go SDK: client: remove `ImageAPIClientDeprecated.ImageInspectWithRaw` from the `ImageAPIClientDeprecated`. [moby/moby#50485](https://github.com/moby/moby/pull/50485) +- Go SDK: client: remove deprecated `ErrorConnectionFailed` and `IsErrNotFound` functions. [moby/moby#50485](https://github.com/moby/moby/pull/50485) +- Go SDK: client: remove deprecated `NewClient` and `NewEnvClient` functions. [moby/moby#50485](https://github.com/moby/moby/pull/50485) +- Go SDK: client: remove the `CommonAPIClient` interface. [moby/moby#50485](https://github.com/moby/moby/pull/50485) +- Go SDK: client: remove the `ImageAPIClientDeprecated` interface. [moby/moby#50485](https://github.com/moby/moby/pull/50485) +- Go SDK: client: remove the deprecated `Client.ImageInspectWithRaw` method. [moby/moby#50485](https://github.com/moby/moby/pull/50485) +- Go SDK: container: remove deprecated `IsValidHealthString`. [moby/moby#50378](https://github.com/moby/moby/pull/50378) +- Go SDK: container: remove deprecated `IsValidStateString`. [moby/moby#50378](https://github.com/moby/moby/pull/50378) +- Go SDK: container: remove deprecated `StateStatus`, `WaitCondition`, and the related `WaitConditionNotRunning`, `WaitConditionNextExit`, and `WaitConditionRemoved` consts. [moby/moby#50378](https://github.com/moby/moby/pull/50378) +- Go SDK: deprecate `pkg/stdcopy`, which was moved to `api/pkg/stdcopy`. [moby/moby#50462](https://github.com/moby/moby/pull/50462) +- Go SDK: Deprecate field `NetworkSettingsBase.Bridge`, struct `NetworkSettingsBase`, all the fields of `DefaultNetworkSettings`, and struct `DefaultNetworkSettings`. [moby/moby#50848](https://github.com/moby/moby/pull/50848) +- Go SDK: deprecate pkg/stringid in favour of `github.com/moby/moby/client/pkg/stringid`. [moby/moby#50504](https://github.com/moby/moby/pull/50504) +- Go SDK: deprecate profiles package which got migrated to `github.com/moby/profiles`. [moby/moby#50481](https://github.com/moby/moby/pull/50481) +- Go SDK: oci: deprecate SetCapabilities, and some minor cleanups/fixes. [moby/moby#50461](https://github.com/moby/moby/pull/50461) +- Go SDK: opts: remove deprecated `ListOpts.GetAll`. It's no longer used and replaced by `ListOpts.GetSlice`. [docker/cli#6293](https://github.com/docker/cli/pull/6293) +- Go SDK: opts: remove deprecated `NewNamedListOptsRef`, `NewNamedMapOpts`, `NamedListOpts`, `NamedMapOpts`, and `NamedOption`. These types and functions are no longer used and will be removed in the next release. [docker/cli#6293](https://github.com/docker/cli/pull/6293) +- Go SDK: opts: remove deprecated `ParseEnvFile` in favour of `kvfile.Parse`. [docker/cli#6382](https://github.com/docker/cli/pull/6382) +- Go SDK: opts: remove deprecated `QuotedString`. [docker/cli#6293](https://github.com/docker/cli/pull/6293) +- Go SDK: opts: remove deprecated `ValidateHost`. [docker/cli#6293](https://github.com/docker/cli/pull/6293) +- Go SDK: pkg/system was removed, and is now an internal package. [moby/moby#50559](https://github.com/moby/moby/pull/50559) +- Go SDK: pkg/system: deprecate `EscapeArgs()` and `IsAbs`. These functions were only used internally and will be removed in the next release. [moby/moby#50399](https://github.com/moby/moby/pull/50399) +- Go SDK: registry: remove deprecated `APIEndpoint.TrimHostName`, `APIEndpoint.Official`, and `APIEndpoint.AllowNondistributableArtifacts` fields. [moby/moby#50376](https://github.com/moby/moby/pull/50376) +- Go SDK: registry: remove deprecated `HostCertsDir()` and `SetCertsDir()` functions. [moby/moby#50373](https://github.com/moby/moby/pull/50373) +- Go SDK: registry: remove deprecated `RepositoryInfo.Official` and `RepositoryInfo.Class` field. [moby/moby#50498](https://github.com/moby/moby/pull/50498) +- Go SDK: registry: remove deprecated `Service.ResolveRepository()`. [moby/moby#50374](https://github.com/moby/moby/pull/50374) +- Go SDK: Remove `buildkit.ClientOpts`. [moby/moby#50318](https://github.com/moby/moby/pull/50318) +- Go SDK: remove `pkg/fileutils`. [moby/moby#50558](https://github.com/moby/moby/pull/50558) +- Go SDK: Remove deprecated `IsNotFound`, `CommandAnnotationPlugin`, `CommandAnnotationPluginVendor`, `CommandAnnotationPluginVersion`, `CommandAnnotationPluginInvalid`, `CommandAnnotationPluginCommandPath`, `NamePrefix`, `MetadataSubcommandName`, `HookSubcommandName`, `Metadata`, and `ReexecEnvvar` from `cli-plugins/manager` in favor of their `cli-plugins/manager/metadata` equivalents. [docker/cli#6414](https://github.com/docker/cli/pull/6414) +- Go SDK: remove deprecated `types/plugins/logdriver` and `types/swarm/runtime` packages; plugin-runtime spec is now exposed as `types/swarm.RuntimeSpec` and `types/swarm.RuntimePrivilege`. [moby/moby#50554](https://github.com/moby/moby/pull/50554) +- Go SDK: remove deprecated `cli/command/formatter` package. [docker/cli#6406](https://github.com/docker/cli/pull/6406) +- Go SDK: remove deprecated `cli/registry/client` package. [docker/cli#6462](https://github.com/docker/cli/pull/6462) +- Go SDK: remove deprecated `pkg/idtools` package. [moby/moby#50456](https://github.com/moby/moby/pull/50456) +- Go SDK: templates: remove deprecated `NewParse` function. [docker/cli#6453](https://github.com/docker/cli/pull/6453) +- Hide the `--kernel-memory` option on `docker run` and `docker create`, and produce a warning when used as it's no longer supported by the daemon and kernel. [docker/cli#6455](https://github.com/docker/cli/pull/6455) +- Remove `VirtualSize` field from `docker image ls` output when using JSON format. [docker/cli#6524](https://github.com/docker/cli/pull/6524) +- Remove `VirtualSize` formatting options and output. [docker/cli#6524](https://github.com/docker/cli/pull/6524) +- Remove API version compatibility for API version < v1.44 (Docker v24.0 and older). [docker/cli#6551](https://github.com/docker/cli/pull/6551) +- Remove deprecated `bind-nonrecursive` option for `--mount`. [docker/cli#6241](https://github.com/docker/cli/pull/6241) +- Remove deprecated packages (`pkg/archive`, `pkg/chrootarchive`, `pkg/atomicwriter`, `pkg/reexec`, `pkg/platform`, `pkg/parsers`), `pkg/system.MkdirAll`. For replacements, see `github.com/moby/go-archive`, `github.com/moby/sys` and the standard library. [moby/moby#50208](https://github.com/moby/moby/pull/50208) +- Remove special handling for quoted values for the `--tlscacert`, `--tlscert`, and `--tlskey` command-line flags. [docker/cli#6306](https://github.com/docker/cli/pull/6306) +- Remove support for AutoRemove (`--rm`) on API < 1.30. [docker/cli#6525](https://github.com/docker/cli/pull/6525) +- Remove support for loading legacy (pre-Docker 1.10) images. [moby/moby#50324](https://github.com/moby/moby/pull/50324) diff --git a/content/manuals/engine/release-notes/prior-releases.md b/content/manuals/engine/release-notes/prior-releases.md index ffe5dac7966..68a53b1ec04 100644 --- a/content/manuals/engine/release-notes/prior-releases.md +++ b/content/manuals/engine/release-notes/prior-releases.md @@ -328,7 +328,7 @@ If you are currently using the `--ipv6` option _without_ specifying the `--fixed-cidr-v6` option, the Docker daemon will refuse to start with the following message: -```none +```text Error starting daemon: Error initializing network controller: Error creating default "bridge" network: failed to parse pool request for address space "LocalDefault" pool " subpool ": @@ -344,7 +344,7 @@ In a similar way, if you specify the `--ipv6` flag when creating a network with the default IPAM driver, without providing an IPv6 `--subnet`, network creation will fail with the following message: -```none +```text Error response from daemon: failed to parse pool request for address space "LocalDefault" pool "" subpool "": could not find an available, non-overlapping IPv6 address pool among @@ -397,7 +397,7 @@ If you are currently using the `--ipv6` option _without_ specifying the `--fixed-cidr-v6` option, the Docker daemon will refuse to start with the following message: -```none +```text Error starting daemon: Error initializing network controller: Error creating default "bridge" network: failed to parse pool request for address space "LocalDefault" pool " subpool ": @@ -413,7 +413,7 @@ In a similar way, if you specify the `--ipv6` flag when creating a network with the default IPAM driver, without providing an IPv6 `--subnet`, network creation will fail with the following message: -```none +```text Error response from daemon: failed to parse pool request for address space "LocalDefault" pool "" subpool "": could not find an available, non-overlapping IPv6 address pool among @@ -1041,7 +1041,7 @@ additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for inst * Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) - Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) * Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) +- Fix endpoint count inconsistency after an ungraceful daemon restart ([#21261](https://github.com/docker/docker/pull/21261)) - Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) - Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) - Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) diff --git a/content/manuals/engine/security/_index.md b/content/manuals/engine/security/_index.md index 0929142d817..2c949d2ffd9 100644 --- a/content/manuals/engine/security/_index.md +++ b/content/manuals/engine/security/_index.md @@ -194,7 +194,7 @@ to the host. This doesn't affect regular web apps, but reduces the vectors of attack by malicious users considerably. By default Docker drops all capabilities except [those -needed](https://github.com/moby/moby/blob/master/oci/caps/defaults.go#L6-L19), +needed](https://github.com/moby/moby/blob/master/daemon/pkg/oci/caps/defaults.go#L6-L19), an allowlist instead of a denylist approach. You can see a full list of available capabilities in [Linux manpages](https://man7.org/linux/man-pages/man7/capabilities.7.html). diff --git a/content/manuals/engine/security/apparmor.md b/content/manuals/engine/security/apparmor.md index f56bf9abad2..8b6b1e0433d 100644 --- a/content/manuals/engine/security/apparmor.md +++ b/content/manuals/engine/security/apparmor.md @@ -28,7 +28,7 @@ in the Docker Engine source repository. The `docker-default` profile is the default for running containers. It is moderately protective while providing wide application compatibility. The profile is generated from the following -[template](https://github.com/moby/moby/blob/master/profiles/apparmor/template.go). +[template](https://github.com/moby/profiles/blob/main/apparmor/template.go). When you run a container, it uses the `docker-default` policy unless you override it with the `security-opt` option. For example, the following @@ -280,4 +280,4 @@ Advanced users and package managers can find a profile for `/usr/bin/docker` in the Docker Engine source repository. The `docker-default` profile for containers lives in -[profiles/apparmor](https://github.com/moby/moby/tree/master/profiles/apparmor). +[profiles/apparmor](https://github.com/moby/profiles/blob/main/apparmor). diff --git a/content/manuals/engine/security/https/README.md b/content/manuals/engine/security/https/README.md index 3ba70493879..ea427151c22 100644 --- a/content/manuals/engine/security/https/README.md +++ b/content/manuals/engine/security/https/README.md @@ -1,5 +1,5 @@ --- -_build: +build: list: never publishResources: false render: never diff --git a/content/manuals/engine/security/rootless.md b/content/manuals/engine/security/rootless.md deleted file mode 100644 index f267d59f4c5..00000000000 --- a/content/manuals/engine/security/rootless.md +++ /dev/null @@ -1,709 +0,0 @@ ---- -description: Run the Docker daemon as a non-root user (Rootless mode) -keywords: security, namespaces, rootless -title: Rootless mode -weight: 10 ---- - -Rootless mode allows running the Docker daemon and containers as a non-root -user to mitigate potential vulnerabilities in the daemon and -the container runtime. - -Rootless mode does not require root privileges even during the installation of -the Docker daemon, as long as the [prerequisites](#prerequisites) are met. - -## How it works - -Rootless mode executes the Docker daemon and containers inside a user namespace. -This is very similar to [`userns-remap` mode](userns-remap.md), except that -with `userns-remap` mode, the daemon itself is running with root privileges, -whereas in rootless mode, both the daemon and the container are running without -root privileges. - -Rootless mode does not use binaries with `SETUID` bits or file capabilities, -except `newuidmap` and `newgidmap`, which are needed to allow multiple -UIDs/GIDs to be used in the user namespace. - -## Prerequisites - -- You must install `newuidmap` and `newgidmap` on the host. These commands - are provided by the `uidmap` package on most distributions. - -- `/etc/subuid` and `/etc/subgid` should contain at least 65,536 subordinate - UIDs/GIDs for the user. In the following example, the user `testuser` has - 65,536 subordinate UIDs/GIDs (231072-296607). - -```console -$ id -u -1001 -$ whoami -testuser -$ grep ^$(whoami): /etc/subuid -testuser:231072:65536 -$ grep ^$(whoami): /etc/subgid -testuser:231072:65536 -``` - -### Distribution-specific hint - -> [!TIP] -> -> We recommend that you use the Ubuntu kernel. - -{{< tabs >}} -{{< tab name="Ubuntu" >}} -- Install `dbus-user-session` package if not installed. Run `sudo apt-get install -y dbus-user-session` and relogin. -- Install `uidmap` package if not installed. Run `sudo apt-get install -y uidmap`. -- If running in a terminal where the user was not directly logged into, you will need to install `systemd-container` with `sudo apt-get install -y systemd-container`, then switch to TheUser with the command `sudo machinectl shell TheUser@`. - -- `overlay2` storage driver is enabled by default - ([Ubuntu-specific kernel patch](https://kernel.ubuntu.com/git/ubuntu/ubuntu-bionic.git/commit/fs/overlayfs?id=3b7da90f28fe1ed4b79ef2d994c81efbc58f1144)). - -- Ubuntu 24.04 and later enables restricted unprivileged user namespaces by - default, which prevents unprivileged processes in creating user namespaces - unless an AppArmor profile is configured to allow programs to use - unprivileged user namespaces. - - If you install `docker-ce-rootless-extras` using the deb package (`apt-get - install docker-ce-rootless-extras`), then the AppArmor profile for - `rootlesskit` is already bundled with the `apparmor` deb package. With this - installation method, you don't need to add any manual the AppArmor - configuration. If you install the rootless extras using the [installation - script](https://get.docker.com/rootless), however, you must add an AppArmor - profile for `rootlesskit` manually: - - 1. Create and install the currently logged-in user's AppArmor profile: - - ```console - $ filename=$(echo $HOME/bin/rootlesskit | sed -e s@^/@@ -e s@/@.@g) - $ cat < ~/${filename} - abi , - include - - "$HOME/bin/rootlesskit" flags=(unconfined) { - userns, - - include if exists - } - EOF - $ sudo mv ~/${filename} /etc/apparmor.d/${filename} - ``` - 2. Restart AppArmor. - - ```console - $ systemctl restart apparmor.service - ``` - -{{< /tab >}} -{{< tab name="Debian GNU/Linux" >}} -- Install `dbus-user-session` package if not installed. Run `sudo apt-get install -y dbus-user-session` and relogin. - -- For Debian 11, installing `fuse-overlayfs` is recommended. Run `sudo apt-get install -y fuse-overlayfs`. - This step is not required on Debian 12. - -- Rootless docker requires version of `slirp4netns` greater than `v0.4.0` (when `vpnkit` is not installed). - Check you have this with - - ```console - $ slirp4netns --version - ``` - If you do not have this download and install with `sudo apt-get install -y slirp4netns` or download the latest [release](https://github.com/rootless-containers/slirp4netns/releases). -{{< /tab >}} -{{< tab name="Arch Linux" >}} -- Installing `fuse-overlayfs` is recommended. Run `sudo pacman -S fuse-overlayfs`. - -- Add `kernel.unprivileged_userns_clone=1` to `/etc/sysctl.conf` (or - `/etc/sysctl.d`) and run `sudo sysctl --system` -{{< /tab >}} -{{< tab name="openSUSE and SLES" >}} -- For openSUSE 15 and SLES 15, Installing `fuse-overlayfs` is recommended. Run `sudo zypper install -y fuse-overlayfs`. - This step is not required on openSUSE Tumbleweed. - -- `sudo modprobe ip_tables iptable_mangle iptable_nat iptable_filter` is required. - This might be required on other distributions as well depending on the configuration. - -- Known to work on openSUSE 15 and SLES 15. -{{< /tab >}} -{{< tab name="CentOS, RHEL, and Fedora" >}} -- For RHEL 8 and similar distributions, installing `fuse-overlayfs` is recommended. Run `sudo dnf install -y fuse-overlayfs`. - This step is not required on RHEL 9 and similar distributions. - -- You might need `sudo dnf install -y iptables`. -{{< /tab >}} -{{< /tabs >}} - -## Known limitations - -- Only the following storage drivers are supported: - - `overlay2` (only if running with kernel 5.11 or later, or Ubuntu-flavored kernel) - - `fuse-overlayfs` (only if running with kernel 4.18 or later, and `fuse-overlayfs` is installed) - - `btrfs` (only if running with kernel 4.18 or later, or `~/.local/share/docker` is mounted with `user_subvol_rm_allowed` mount option) - - `vfs` -- Cgroup is supported only when running with cgroup v2 and systemd. See [Limiting resources](#limiting-resources). -- Following features are not supported: - - AppArmor - - Checkpoint - - Overlay network - - Exposing SCTP ports -- To use the `ping` command, see [Routing ping packets](#routing-ping-packets). -- To expose privileged TCP/UDP ports (< 1024), see [Exposing privileged ports](#exposing-privileged-ports). -- `IPAddress` shown in `docker inspect` is namespaced inside RootlessKit's network namespace. - This means the IP address is not reachable from the host without `nsenter`-ing into the network namespace. -- Host network (`docker run --net=host`) is also namespaced inside RootlessKit. -- NFS mounts as the docker "data-root" is not supported. This limitation is not specific to rootless mode. - -## Install - -> [!NOTE] -> -> If the system-wide Docker daemon is already running, consider disabling it: ->```console ->$ sudo systemctl disable --now docker.service docker.socket ->$ sudo rm /var/run/docker.sock ->``` -> Should you choose not to shut down the `docker` service and socket, you will need to use the `--force` -> parameter in the next section. There are no known issues, but until you shutdown and disable you're -> still running rootful Docker. - -{{< tabs >}} -{{< tab name="With packages (RPM/DEB)" >}} - -If you installed Docker 20.10 or later with [RPM/DEB packages](/engine/install), you should have `dockerd-rootless-setuptool.sh` in `/usr/bin`. - -Run `dockerd-rootless-setuptool.sh install` as a non-root user to set up the daemon: - -```console -$ dockerd-rootless-setuptool.sh install -[INFO] Creating /home/testuser/.config/systemd/user/docker.service -... -[INFO] Installed docker.service successfully. -[INFO] To control docker.service, run: `systemctl --user (start|stop|restart) docker.service` -[INFO] To run docker.service on system startup, run: `sudo loginctl enable-linger testuser` - -[INFO] Make sure the following environment variables are set (or add them to ~/.bashrc): - -export PATH=/usr/bin:$PATH -export DOCKER_HOST=unix:///run/user/1000/docker.sock -``` - -If `dockerd-rootless-setuptool.sh` is not present, you may need to install the `docker-ce-rootless-extras` package manually, e.g., - -```console -$ sudo apt-get install -y docker-ce-rootless-extras -``` - -{{< /tab >}} -{{< tab name="Without packages" >}} - -If you do not have permission to run package managers like `apt-get` and `dnf`, -consider using the installation script available at [https://get.docker.com/rootless](https://get.docker.com/rootless). -Since static packages are not available for `s390x`, hence it is not supported for `s390x`. - -```console -$ curl -fsSL https://get.docker.com/rootless | sh -... -[INFO] Creating /home/testuser/.config/systemd/user/docker.service -... -[INFO] Installed docker.service successfully. -[INFO] To control docker.service, run: `systemctl --user (start|stop|restart) docker.service` -[INFO] To run docker.service on system startup, run: `sudo loginctl enable-linger testuser` - -[INFO] Make sure the following environment variables are set (or add them to ~/.bashrc): - -export PATH=/home/testuser/bin:$PATH -export DOCKER_HOST=unix:///run/user/1000/docker.sock -``` - -The binaries will be installed at `~/bin`. - -{{< /tab >}} -{{< /tabs >}} - -See [Troubleshooting](#troubleshooting) if you faced an error. - -## Uninstall - -To remove the systemd service of the Docker daemon, run `dockerd-rootless-setuptool.sh uninstall`: - -```console -$ dockerd-rootless-setuptool.sh uninstall -+ systemctl --user stop docker.service -+ systemctl --user disable docker.service -Removed /home/testuser/.config/systemd/user/default.target.wants/docker.service. -[INFO] Uninstalled docker.service -[INFO] This uninstallation tool does NOT remove Docker binaries and data. -[INFO] To remove data, run: `/usr/bin/rootlesskit rm -rf /home/testuser/.local/share/docker` -``` - -Unset environment variables PATH and DOCKER_HOST if you have added them to `~/.bashrc`. - -To remove the data directory, run `rootlesskit rm -rf ~/.local/share/docker`. - -To remove the binaries, remove `docker-ce-rootless-extras` package if you installed Docker with package managers. -If you installed Docker with https://get.docker.com/rootless ([Install without packages](#install)), -remove the binary files under `~/bin`: -```console -$ cd ~/bin -$ rm -f containerd containerd-shim containerd-shim-runc-v2 ctr docker docker-init docker-proxy dockerd dockerd-rootless-setuptool.sh dockerd-rootless.sh rootlesskit rootlesskit-docker-proxy runc vpnkit -``` - -## Usage - -### Daemon - -{{< tabs >}} -{{< tab name="With systemd (Highly recommended)" >}} - -The systemd unit file is installed as `~/.config/systemd/user/docker.service`. - -Use `systemctl --user` to manage the lifecycle of the daemon: - -```console -$ systemctl --user start docker -``` - -To launch the daemon on system startup, enable the systemd service and lingering: - -```console -$ systemctl --user enable docker -$ sudo loginctl enable-linger $(whoami) -``` - -Starting Rootless Docker as a systemd-wide service (`/etc/systemd/system/docker.service`) -is not supported, even with the `User=` directive. - -{{< /tab >}} -{{< tab name="Without systemd" >}} - -To run the daemon directly without systemd, you need to run `dockerd-rootless.sh` instead of `dockerd`. - -The following environment variables must be set: -- `$HOME`: the home directory -- `$XDG_RUNTIME_DIR`: an ephemeral directory that is only accessible by the expected user, e,g, `~/.docker/run`. - The directory should be removed on every host shutdown. - The directory can be on tmpfs, however, should not be under `/tmp`. - Locating this directory under `/tmp` might be vulnerable to TOCTOU attack. - -{{< /tab >}} -{{< /tabs >}} - -Remarks about directory paths: - -- The socket path is set to `$XDG_RUNTIME_DIR/docker.sock` by default. - `$XDG_RUNTIME_DIR` is typically set to `/run/user/$UID`. -- The data dir is set to `~/.local/share/docker` by default. - The data dir should not be on NFS. -- The daemon config dir is set to `~/.config/docker` by default. - This directory is different from `~/.docker` that is used by the client. - -### Client - -You need to specify either the socket path or the CLI context explicitly. - -To specify the socket path using `$DOCKER_HOST`: - -```console -$ export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/docker.sock -$ docker run -d -p 8080:80 nginx -``` - -To specify the CLI context using `docker context`: - -```console -$ docker context use rootless -rootless -Current context is now "rootless" -$ docker run -d -p 8080:80 nginx -``` - -## Best practices - -### Rootless Docker in Docker - -To run Rootless Docker inside "rootful" Docker, use the `docker:-dind-rootless` -image instead of `docker:-dind`. - -```console -$ docker run -d --name dind-rootless --privileged docker:25.0-dind-rootless -``` - -The `docker:-dind-rootless` image runs as a non-root user (UID 1000). -However, `--privileged` is required for disabling seccomp, AppArmor, and mount -masks. - -### Expose Docker API socket through TCP - -To expose the Docker API socket through TCP, you need to launch `dockerd-rootless.sh` -with `DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp"`. - -```console -$ DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp" \ - dockerd-rootless.sh \ - -H tcp://0.0.0.0:2376 \ - --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -``` - -### Expose Docker API socket through SSH - -To expose the Docker API socket through SSH, you need to make sure `$DOCKER_HOST` -is set on the remote host. - -```console -$ ssh -l 'echo $DOCKER_HOST' -unix:///run/user/1001/docker.sock -$ docker -H ssh://@ run ... -``` - -### Routing ping packets - -On some distributions, `ping` does not work by default. - -Add `net.ipv4.ping_group_range = 0 2147483647` to `/etc/sysctl.conf` (or -`/etc/sysctl.d`) and run `sudo sysctl --system` to allow using `ping`. - -### Exposing privileged ports - -To expose privileged ports (< 1024), set `CAP_NET_BIND_SERVICE` on `rootlesskit` binary and restart the daemon. - -```console -$ sudo setcap cap_net_bind_service=ep $(which rootlesskit) -$ systemctl --user restart docker -``` - -Or add `net.ipv4.ip_unprivileged_port_start=0` to `/etc/sysctl.conf` (or -`/etc/sysctl.d`) and run `sudo sysctl --system`. - -### Limiting resources - -Limiting resources with cgroup-related `docker run` flags such as `--cpus`, `--memory`, `--pids-limit` -is supported only when running with cgroup v2 and systemd. -See [Changing cgroup version](/manuals/engine/containers/runmetrics.md) to enable cgroup v2. - -If `docker info` shows `none` as `Cgroup Driver`, the conditions are not satisfied. -When these conditions are not satisfied, rootless mode ignores the cgroup-related `docker run` flags. -See [Limiting resources without cgroup](#limiting-resources-without-cgroup) for workarounds. - -If `docker info` shows `systemd` as `Cgroup Driver`, the conditions are satisfied. -However, typically, only `memory` and `pids` controllers are delegated to non-root users by default. - -```console -$ cat /sys/fs/cgroup/user.slice/user-$(id -u).slice/user@$(id -u).service/cgroup.controllers -memory pids -``` - -To allow delegation of all controllers, you need to change the systemd configuration as follows: - -```console -# mkdir -p /etc/systemd/system/user@.service.d -# cat > /etc/systemd/system/user@.service.d/delegate.conf << EOF -[Service] -Delegate=cpu cpuset io memory pids -EOF -# systemctl daemon-reload -``` - -> [!NOTE] -> -> Delegating `cpuset` requires systemd 244 or later. - -#### Limiting resources without cgroup - -Even when cgroup is not available, you can still use the traditional `ulimit` and [`cpulimit`](https://github.com/opsengine/cpulimit), -though they work in process-granularity rather than in container-granularity, -and can be arbitrarily disabled by the container process. - -For example: - -- To limit CPU usage to 0.5 cores (similar to `docker run --cpus 0.5`): - `docker run cpulimit --limit=50 --include-children ` -- To limit max VSZ to 64MiB (similar to `docker run --memory 64m`): - `docker run sh -c "ulimit -v 65536; "` - -- To limit max number of processes to 100 per namespaced UID 2000 - (similar to `docker run --pids-limit=100`): - `docker run --user 2000 --ulimit nproc=100 ` - -## Troubleshooting - -### Unable to install with systemd when systemd is present on the system - -``` console -$ dockerd-rootless-setuptool.sh install -[INFO] systemd not detected, dockerd-rootless.sh needs to be started manually: -... -``` -`rootlesskit` cannot detect systemd properly if you switch to your user via `sudo su`. For users which cannot be logged-in, you must use the `machinectl` command which is part of the `systemd-container` package. After installing `systemd-container` switch to `myuser` with the following command: -``` console -$ sudo machinectl shell myuser@ -``` -Where `myuser@` is your desired username and @ signifies this machine. - -### Errors when starting the Docker daemon - -**\[rootlesskit:parent\] error: failed to start the child: fork/exec /proc/self/exe: operation not permitted** - -This error occurs mostly when the value of `/proc/sys/kernel/unprivileged_userns_clone` is set to 0: - -```console -$ cat /proc/sys/kernel/unprivileged_userns_clone -0 -``` - -To fix this issue, add `kernel.unprivileged_userns_clone=1` to -`/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl --system`. - -**\[rootlesskit:parent\] error: failed to start the child: fork/exec /proc/self/exe: no space left on device** - -This error occurs mostly when the value of `/proc/sys/user/max_user_namespaces` is too small: - -```console -$ cat /proc/sys/user/max_user_namespaces -0 -``` - -To fix this issue, add `user.max_user_namespaces=28633` to -`/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl --system`. - -**\[rootlesskit:parent\] error: failed to setup UID/GID map: failed to compute uid/gid map: No subuid ranges found for user 1001 ("testuser")** - -This error occurs when `/etc/subuid` and `/etc/subgid` are not configured. See [Prerequisites](#prerequisites). - -**could not get XDG_RUNTIME_DIR** - -This error occurs when `$XDG_RUNTIME_DIR` is not set. - -On a non-systemd host, you need to create a directory and then set the path: - -```console -$ export XDG_RUNTIME_DIR=$HOME/.docker/xrd -$ rm -rf $XDG_RUNTIME_DIR -$ mkdir -p $XDG_RUNTIME_DIR -$ dockerd-rootless.sh -``` - -> [!NOTE] -> -> You must remove the directory every time you log out. - -On a systemd host, log into the host using `pam_systemd` (see below). -The value is automatically set to `/run/user/$UID` and cleaned up on every logout. - -**`systemctl --user` fails with "Failed to connect to bus: No such file or directory"** - -This error occurs mostly when you switch from the root user to a non-root user with `sudo`: - -```console -# sudo -iu testuser -$ systemctl --user start docker -Failed to connect to bus: No such file or directory -``` - -Instead of `sudo -iu `, you need to log in using `pam_systemd`. For example: - -- Log in through the graphic console -- `ssh @localhost` -- `machinectl shell @` - -**The daemon does not start up automatically** - -You need `sudo loginctl enable-linger $(whoami)` to enable the daemon to start -up automatically. See [Usage](#usage). - -**iptables failed: iptables -t nat -N DOCKER: Fatal: can't open lock file /run/xtables.lock: Permission denied** - -This error may happen with an older version of Docker when SELinux is enabled on the host. - -The issue has been fixed in Docker 20.10.8. -A known workaround for older version of Docker is to run the following commands to disable SELinux for `iptables`: -```console -$ sudo dnf install -y policycoreutils-python-utils && sudo semanage permissive -a iptables_t -``` - -### `docker pull` errors - -**docker: failed to register layer: Error processing tar file(exit status 1): lchown <FILE>: invalid argument** - -This error occurs when the number of available entries in `/etc/subuid` or -`/etc/subgid` is not sufficient. The number of entries required vary across -images. However, 65,536 entries are sufficient for most images. See -[Prerequisites](#prerequisites). - -**docker: failed to register layer: ApplyLayer exit status 1 stdout: stderr: lchown <FILE>: operation not permitted** - -This error occurs mostly when `~/.local/share/docker` is located on NFS. - -A workaround is to specify non-NFS `data-root` directory in `~/.config/docker/daemon.json` as follows: -```json -{"data-root":"/somewhere-out-of-nfs"} -``` - -### `docker run` errors - -**docker: Error response from daemon: OCI runtime create failed: ...: read unix @->/run/systemd/private: read: connection reset by peer: unknown.** - -This error occurs on cgroup v2 hosts mostly when the dbus daemon is not running for the user. - -```console -$ systemctl --user is-active dbus -inactive - -$ docker run hello-world -docker: Error response from daemon: OCI runtime create failed: container_linux.go:380: starting container process caused: process_linux.go:385: applying cgroup configuration for process caused: error while starting unit "docker --931c15729b5a968ce803784d04c7421f791d87e5ca1891f34387bb9f694c488e.scope" with properties [{Name:Description Value:"libcontainer container 931c15729b5a968ce803784d04c7421f791d87e5ca1891f34387bb9f694c488e"} {Name:Slice Value:"use -r.slice"} {Name:PIDs Value:@au [4529]} {Name:Delegate Value:true} {Name:MemoryAccounting Value:true} {Name:CPUAccounting Value:true} {Name:IOAccounting Value:true} {Name:TasksAccounting Value:true} {Name:DefaultDependencies Val -ue:false}]: read unix @->/run/systemd/private: read: connection reset by peer: unknown. -``` - -To fix the issue, run `sudo apt-get install -y dbus-user-session` or `sudo dnf install -y dbus-daemon`, and then relogin. - -If the error still occurs, try running `systemctl --user enable --now dbus` (without sudo). - -**`--cpus`, `--memory`, and `--pids-limit` are ignored** - -This is an expected behavior on cgroup v1 mode. -To use these flags, the host needs to be configured for enabling cgroup v2. -For more information, see [Limiting resources](#limiting-resources). - -### Networking errors - -This section provides troubleshooting tips for networking in rootless mode. - -Networking in rootless mode is supported via network and port drivers in -RootlessKit. Network performance and characteristics depend on the combination -of network and port driver you use. If you're experiencing unexpected behavior -or performance related to networking, review the following table which shows -the configurations supported by RootlessKit, and how they compare: - -| Network driver | Port driver | Net throughput | Port throughput | Source IP propagation | No SUID | Note | -| -------------- | -------------- | -------------- | --------------- | --------------------- | ------- | ---------------------------------------------------------------------------- | -| `slirp4netns` | `builtin` | Slow | Fast ✅ | ❌ | ✅ | Default in a typical setup | -| `vpnkit` | `builtin` | Slow | Fast ✅ | ❌ | ✅ | Default when `slirp4netns` isn't installed | -| `slirp4netns` | `slirp4netns` | Slow | Slow | ✅ | ✅ | | -| `pasta` | `implicit` | Slow | Fast ✅ | ✅ | ✅ | Experimental; Needs pasta version 2023_12_04 or later | -| `lxc-user-nic` | `builtin` | Fast ✅ | Fast ✅ | ❌ | ❌ | Experimental | -| `bypass4netns` | `bypass4netns` | Fast ✅ | Fast ✅ | ✅ | ✅ | **Note:** Not integrated to RootlessKit as it needs a custom seccomp profile | - -For information about troubleshooting specific networking issues, see: - -- [`docker run -p` fails with `cannot expose privileged port`](#docker-run--p-fails-with-cannot-expose-privileged-port) -- [Ping doesn't work](#ping-doesnt-work) -- [`IPAddress` shown in `docker inspect` is unreachable](#ipaddress-shown-in-docker-inspect-is-unreachable) -- [`--net=host` doesn't listen ports on the host network namespace](#--nethost-doesnt-listen-ports-on-the-host-network-namespace) -- [Network is slow](#network-is-slow) -- [`docker run -p` does not propagate source IP addresses](#docker-run--p-does-not-propagate-source-ip-addresses) - -#### `docker run -p` fails with `cannot expose privileged port` - -`docker run -p` fails with this error when a privileged port (< 1024) is specified as the host port. - -```console -$ docker run -p 80:80 nginx:alpine -docker: Error response from daemon: driver failed programming external connectivity on endpoint focused_swanson (9e2e139a9d8fc92b37c36edfa6214a6e986fa2028c0cc359812f685173fa6df7): Error starting userland proxy: error while calling PortManager.AddPort(): cannot expose privileged port 80, you might need to add "net.ipv4.ip_unprivileged_port_start=0" (currently 1024) to /etc/sysctl.conf, or set CAP_NET_BIND_SERVICE on rootlesskit binary, or choose a larger port number (>= 1024): listen tcp 0.0.0.0:80: bind: permission denied. -``` - -When you experience this error, consider using an unprivileged port instead. For example, 8080 instead of 80. - -```console -$ docker run -p 8080:80 nginx:alpine -``` - -To allow exposing privileged ports, see [Exposing privileged ports](#exposing-privileged-ports). - -#### Ping doesn't work - -Ping does not work when `/proc/sys/net/ipv4/ping_group_range` is set to `1 0`: - -```console -$ cat /proc/sys/net/ipv4/ping_group_range -1 0 -``` - -For details, see [Routing ping packets](#routing-ping-packets). - -#### `IPAddress` shown in `docker inspect` is unreachable - -This is an expected behavior, as the daemon is namespaced inside RootlessKit's -network namespace. Use `docker run -p` instead. - -#### `--net=host` doesn't listen ports on the host network namespace - -This is an expected behavior, as the daemon is namespaced inside RootlessKit's -network namespace. Use `docker run -p` instead. - -#### Network is slow - -Docker with rootless mode uses [slirp4netns](https://github.com/rootless-containers/slirp4netns) as the default network stack if slirp4netns v0.4.0 or later is installed. -If slirp4netns is not installed, Docker falls back to [VPNKit](https://github.com/moby/vpnkit). -Installing slirp4netns may improve the network throughput. - -For more information about network drivers for RootlessKit, see -[RootlessKit documentation](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/network.md). - -Also, changing MTU value may improve the throughput. -The MTU value can be specified by creating `~/.config/systemd/user/docker.service.d/override.conf` with the following content: - -```systemd -[Service] -Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=" -``` - -And then restart the daemon: -```console -$ systemctl --user daemon-reload -$ systemctl --user restart docker -``` - -#### `docker run -p` does not propagate source IP addresses - -This is because Docker in rootless mode uses RootlessKit's `builtin` port -driver by default, which doesn't support source IP propagation. To enable -source IP propagation, you can: - -- Use the `slirp4netns` RootlessKit port driver -- Use the `pasta` RootlessKit network driver, with the `implicit` port driver - -The `pasta` network driver is experimental, but provides improved throughput -performance compared to the `slirp4netns` port driver. The `pasta` driver -requires Docker Engine version 25.0 or later. - -To change the RootlessKit networking configuration: - -1. Create a file at `~/.config/systemd/user/docker.service.d/override.conf`. -2. Add the following contents, depending on which configuration you would like to use: - - - `slirp4netns` - - ```systemd - [Service] - Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=slirp4netns" - Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=slirp4netns" - ``` - - - `pasta` network driver with `implicit` port driver - - ```systemd - [Service] - Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=pasta" - Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=implicit" - ``` - -3. Restart the daemon: - - ```console - $ systemctl --user daemon-reload - $ systemctl --user restart docker - ``` - -For more information about networking options for RootlessKit, see: - -- [Network drivers](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/network.md) -- [Port drivers](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/port.md) - -### Tips for debugging - -**Entering into `dockerd` namespaces** - -The `dockerd-rootless.sh` script executes `dockerd` in its own user, mount, and network namespaces. - -For debugging, you can enter the namespaces by running -`nsenter -U --preserve-credentials -n -m -t $(cat $XDG_RUNTIME_DIR/docker.pid)`. diff --git a/content/manuals/engine/security/rootless/_index.md b/content/manuals/engine/security/rootless/_index.md new file mode 100644 index 00000000000..45cc7875dd5 --- /dev/null +++ b/content/manuals/engine/security/rootless/_index.md @@ -0,0 +1,147 @@ +--- +description: Run the Docker daemon as a non-root user (Rootless mode) +keywords: security, namespaces, rootless +title: Rootless mode +weight: 10 +--- + +Rootless mode lets you run the Docker daemon and containers as a non-root +user to mitigate potential vulnerabilities in the daemon and +the container runtime. + +Rootless mode does not require root privileges even during the installation of +the Docker daemon, as long as the [prerequisites](#prerequisites) are met. + +## How it works + +Rootless mode executes the Docker daemon and containers inside a user namespace. +This is similar to [`userns-remap` mode](../userns-remap.md), except that +with `userns-remap` mode, the daemon itself is running with root privileges, +whereas in rootless mode, both the daemon and the container are running without +root privileges. + +Rootless mode does not use binaries with `SETUID` bits or file capabilities, +except `newuidmap` and `newgidmap`, which are needed to allow multiple +UIDs/GIDs to be used in the user namespace. + + +## Prerequisites + +- You must install `newuidmap` and `newgidmap` on the host. These commands + are provided by the `uidmap` package on most distributions. + +- `/etc/subuid` and `/etc/subgid` should contain at least 65,536 subordinate + UIDs/GIDs for the user. In the following example, the user `testuser` has + 65,536 subordinate UIDs/GIDs (231072-296607). + +```console +$ id -u +1001 +$ whoami +testuser +$ grep ^$(whoami): /etc/subuid +testuser:231072:65536 +$ grep ^$(whoami): /etc/subgid +testuser:231072:65536 +``` + +The `dockerd-rootless-setuptool.sh install` script (see following) automatically shows help +when the prerequisites are not satisfied. + +## Install + +> [!NOTE] +> +> If the system-wide Docker daemon is already running, consider disabling it: +>```console +>$ sudo systemctl disable --now docker.service docker.socket +>$ sudo rm /var/run/docker.sock +>``` +> Should you choose not to shut down the `docker` service and socket, you will need to use the `--force` +> parameter in the next section. There are no known issues, but until you shutdown and disable you're +> still running rootful Docker. + +{{< tabs >}} +{{< tab name="With packages (RPM/DEB)" >}} + +If you installed Docker 20.10 or later with [RPM/DEB packages](/engine/install), you should have `dockerd-rootless-setuptool.sh` in `/usr/bin`. + +Run `dockerd-rootless-setuptool.sh install` as a non-root user to set up the daemon: + +```console +$ dockerd-rootless-setuptool.sh install +[INFO] Creating /home/testuser/.config/systemd/user/docker.service +... +[INFO] Installed docker.service successfully. +[INFO] To control docker.service, run: `systemctl --user (start|stop|restart) docker.service` +[INFO] To run docker.service on system startup, run: `sudo loginctl enable-linger testuser` + +[INFO] Creating CLI context "rootless" +Successfully created context "rootless" +[INFO] Using CLI context "rootless" +Current context is now "rootless" + +[INFO] Make sure the following environment variable(s) are set (or add them to ~/.bashrc): +export PATH=/usr/bin:$PATH + +[INFO] Some applications may require the following environment variable too: +export DOCKER_HOST=unix:///run/user/1000/docker.sock +``` + +If `dockerd-rootless-setuptool.sh` is not present, you may need to install the `docker-ce-rootless-extras` package manually, e.g., + +```console +$ sudo apt-get install -y docker-ce-rootless-extras +``` + +{{< /tab >}} +{{< tab name="Without packages" >}} + +If you do not have permission to run package managers like `apt-get` and `dnf`, +consider using the installation script available at [https://get.docker.com/rootless](https://get.docker.com/rootless). +Since static packages are not available for `s390x`, hence it is not supported for `s390x`. + +```console +$ curl -fsSL https://get.docker.com/rootless | sh +... +[INFO] Creating /home/testuser/.config/systemd/user/docker.service +... +[INFO] Installed docker.service successfully. +[INFO] To control docker.service, run: `systemctl --user (start|stop|restart) docker.service` +[INFO] To run docker.service on system startup, run: `sudo loginctl enable-linger testuser` + +[INFO] Creating CLI context "rootless" +Successfully created context "rootless" +[INFO] Using CLI context "rootless" +Current context is now "rootless" + +[INFO] Make sure the following environment variable(s) are set (or add them to ~/.bashrc): +export PATH=/home/testuser/bin:$PATH + +[INFO] Some applications may require the following environment variable too: +export DOCKER_HOST=unix:///run/user/1000/docker.sock +``` + +The binaries will be installed at `~/bin`. + +{{< /tab >}} +{{< /tabs >}} + +Run `docker info` to confirm that the `docker` client is connecting to the Rootless daemon: +```console +$ docker info +Client: Docker Engine - Community + Version: 28.3.3 + Context: rootless +... +Server: +... + Security Options: + seccomp + Profile: builtin + rootless + cgroupns +... +``` + +See [Troubleshooting](./troubleshoot.md) if you faced an error. \ No newline at end of file diff --git a/content/manuals/engine/security/rootless/tips.md b/content/manuals/engine/security/rootless/tips.md new file mode 100644 index 00000000000..d0f810e9d93 --- /dev/null +++ b/content/manuals/engine/security/rootless/tips.md @@ -0,0 +1,185 @@ +--- +description: Tips for the Rootless mode +keywords: security, namespaces, rootless +title: Tips +weight: 20 +--- + +## Advanced usage + +### Daemon + +{{< tabs >}} +{{< tab name="With systemd (Highly recommended)" >}} + +The systemd unit file is installed as `~/.config/systemd/user/docker.service`. + +Use `systemctl --user` to manage the lifecycle of the daemon: + +```console +$ systemctl --user start docker +``` + +To launch the daemon on system startup, enable the systemd service and lingering: + +```console +$ systemctl --user enable docker +$ sudo loginctl enable-linger $(whoami) +``` + +Starting Rootless Docker as a systemd-wide service (`/etc/systemd/system/docker.service`) +is not supported, even with the `User=` directive. + +{{< /tab >}} +{{< tab name="Without systemd" >}} + +To run the daemon directly without systemd, you need to run `dockerd-rootless.sh` instead of `dockerd`. + +The following environment variables must be set: +- `$HOME`: the home directory +- `$XDG_RUNTIME_DIR`: an ephemeral directory that is only accessible by the expected user, e,g, `~/.docker/run`. + The directory should be removed on every host shutdown. + The directory can be on tmpfs, however, should not be under `/tmp`. + Locating this directory under `/tmp` might be vulnerable to TOCTOU attack. + +{{< /tab >}} +{{< /tabs >}} + +It's important to note that with directory paths: + +- The socket path is set to `$XDG_RUNTIME_DIR/docker.sock` by default. + `$XDG_RUNTIME_DIR` is typically set to `/run/user/$UID`. +- The data dir is set to `~/.local/share/docker` by default. + The data dir should not be on NFS. +- The daemon config dir is set to `~/.config/docker` by default. + This directory is different from `~/.docker` that is used by the client. + +### Client + +Since Docker Engine v23.0, `dockerd-rootless-setuptool.sh install` automatically configures +the `docker` CLI to use the `rootless` context. + +Prior to Docker Engine v23.0, a user had to specify either the socket path or the CLI context explicitly. + +To specify the socket path using `$DOCKER_HOST`: + +```console +$ export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/docker.sock +$ docker run -d -p 8080:80 nginx +``` + +To specify the CLI context using `docker context`: + +```console +$ docker context use rootless +rootless +Current context is now "rootless" +$ docker run -d -p 8080:80 nginx +``` + +## Best practices + +### Rootless Docker in Docker + +To run Rootless Docker inside "rootful" Docker, use the `docker:-dind-rootless` +image instead of `docker:-dind`. + +```console +$ docker run -d --name dind-rootless --privileged docker:25.0-dind-rootless +``` + +The `docker:-dind-rootless` image runs as a non-root user (UID 1000). +However, `--privileged` is required for disabling seccomp, AppArmor, and mount +masks. + +### Expose Docker API socket through TCP + +To expose the Docker API socket through TCP, you need to launch `dockerd-rootless.sh` +with `DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp"`. + +```console +$ DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp" \ + dockerd-rootless.sh \ + -H tcp://0.0.0.0:2376 \ + --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem +``` + +### Expose Docker API socket through SSH + +To expose the Docker API socket through SSH, you need to make sure `$DOCKER_HOST` +is set on the remote host. + +```console +$ ssh -l 'echo $DOCKER_HOST' +unix:///run/user/1001/docker.sock +$ docker -H ssh://@ run ... +``` + +### Routing ping packets + +On some distributions, `ping` does not work by default. + +Add `net.ipv4.ping_group_range = 0 2147483647` to `/etc/sysctl.conf` (or +`/etc/sysctl.d`) and run `sudo sysctl --system` to allow using `ping`. + +### Exposing privileged ports + +To expose privileged ports (< 1024), set `CAP_NET_BIND_SERVICE` on `rootlesskit` binary and restart the daemon. + +```console +$ sudo setcap cap_net_bind_service=ep $(which rootlesskit) +$ systemctl --user restart docker +``` + +Or add `net.ipv4.ip_unprivileged_port_start=0` to `/etc/sysctl.conf` (or +`/etc/sysctl.d`) and run `sudo sysctl --system`. + +### Limiting resources + +Limiting resources with cgroup-related `docker run` flags such as `--cpus`, `--memory`, `--pids-limit` +is supported only when running with cgroup v2 and systemd. +See [Changing cgroup version](/manuals/engine/containers/runmetrics.md) to enable cgroup v2. + +If `docker info` shows `none` as `Cgroup Driver`, the conditions are not satisfied. +When these conditions are not satisfied, rootless mode ignores the cgroup-related `docker run` flags. +See [Limiting resources without cgroup](#limiting-resources-without-cgroup) for workarounds. + +If `docker info` shows `systemd` as `Cgroup Driver`, the conditions are satisfied. +However, typically, only `memory` and `pids` controllers are delegated to non-root users by default. + +```console +$ cat /sys/fs/cgroup/user.slice/user-$(id -u).slice/user@$(id -u).service/cgroup.controllers +memory pids +``` + +To allow delegation of all controllers, you need to change the systemd configuration as follows: + +```console +# mkdir -p /etc/systemd/system/user@.service.d +# cat > /etc/systemd/system/user@.service.d/delegate.conf << EOF +[Service] +Delegate=cpu cpuset io memory pids +EOF +# systemctl daemon-reload +``` + +> [!NOTE] +> +> Delegating `cpuset` requires systemd 244 or later. + +#### Limiting resources without cgroup + +Even when cgroup is not available, you can still use the traditional `ulimit` and [`cpulimit`](https://github.com/opsengine/cpulimit), +though they work in process-granularity rather than in container-granularity, +and can be arbitrarily disabled by the container process. + +For example: + +- To limit CPU usage to 0.5 cores (similar to `docker run --cpus 0.5`): + `docker run cpulimit --limit=50 --include-children ` +- To limit max VSZ to 64MiB (similar to `docker run --memory 64m`): + `docker run sh -c "ulimit -v 65536; "` + +- To limit max number of processes to 100 per namespaced UID 2000 + (similar to `docker run --pids-limit=100`): + `docker run --user 2000 --ulimit nproc=100 ` diff --git a/content/manuals/engine/security/rootless/troubleshoot.md b/content/manuals/engine/security/rootless/troubleshoot.md new file mode 100644 index 00000000000..2a298a94eb5 --- /dev/null +++ b/content/manuals/engine/security/rootless/troubleshoot.md @@ -0,0 +1,384 @@ +--- +description: Troubleshooting the Rootless mode +keywords: security, namespaces, rootless, troubleshooting +title: Troubleshooting +weight: 30 +--- + +### Distribution-specific hint + +{{< tabs >}} +{{< tab name="Ubuntu" >}} +- Ubuntu 24.04 and later enables restricted unprivileged user namespaces by + default, which prevents unprivileged processes in creating user namespaces + unless an AppArmor profile is configured to allow programs to use + unprivileged user namespaces. + + If you install `docker-ce-rootless-extras` using the deb package (`apt-get + install docker-ce-rootless-extras`), then the AppArmor profile for + `rootlesskit` is already bundled with the `apparmor` deb package. With this + installation method, you don't need to add any manual the AppArmor + configuration. If you install the rootless extras using the [installation + script](https://get.docker.com/rootless), however, you must add an AppArmor + profile for `rootlesskit` manually: + + 1. Create and install the currently logged-in user's AppArmor profile: + + ```console + $ filename=$(echo $HOME/bin/rootlesskit | sed -e 's@^/@@' -e 's@/@.@g') + $ [ ! -z "${filename}" ] && sudo cat < /etc/apparmor.d/${filename} + abi , + include + + "$HOME/bin/rootlesskit" flags=(unconfined) { + userns, + + include if exists + } + EOF + ``` + 2. Restart AppArmor. + + ```console + $ systemctl restart apparmor.service + ``` + +{{< /tab >}} +{{< tab name="Arch Linux" >}} +- Add `kernel.unprivileged_userns_clone=1` to `/etc/sysctl.conf` (or + `/etc/sysctl.d`) and run `sudo sysctl --system` +{{< /tab >}} +{{< tab name="openSUSE and SLES" >}} +- `sudo modprobe ip_tables iptable_mangle iptable_nat iptable_filter` is required. + This might be required on other distributions as well depending on the configuration. + +- Known to work on openSUSE 15 and SLES 15. +{{< /tab >}} +{{< tab name="CentOS, RHEL, and Fedora" >}} +- For RHEL 8 and similar distributions, installing `fuse-overlayfs` is recommended. Run `sudo dnf install -y fuse-overlayfs`. + This step is not required on RHEL 9 and similar distributions. + +- You might need `sudo dnf install -y iptables`. +{{< /tab >}} +{{< /tabs >}} + +## Known limitations + +- Only the following storage drivers are supported: + - `overlay2` (only if running with kernel 5.11 or later) + - `fuse-overlayfs` (only if running with kernel 4.18 or later, and `fuse-overlayfs` is installed) + - `btrfs` (only if running with kernel 4.18 or later, or `~/.local/share/docker` is mounted with `user_subvol_rm_allowed` mount option) + - `vfs` +- cgroup is supported only when running with cgroup v2 and systemd. See [Limiting resources](./tips.md#limiting-resources). +- Following features are not supported: + - AppArmor + - Checkpoint + - Overlay network + - Exposing SCTP ports +- To use the `ping` command, see [Routing ping packets](./tips.md#routing-ping-packets). +- To expose privileged TCP/UDP ports (< 1024), see [Exposing privileged ports](./tips.md#exposing-privileged-ports). +- `IPAddress` shown in `docker inspect` is namespaced inside RootlessKit's network namespace. + This means the IP address is not reachable from the host without `nsenter`-ing into the network namespace. +- Host network (`docker run --net=host`) is also namespaced inside RootlessKit. +- NFS mounts as the docker "data-root" is not supported. This limitation is not specific to rootless mode. + +## Troubleshooting + +### Unable to install with systemd when systemd is present on the system + +``` console +$ dockerd-rootless-setuptool.sh install +[INFO] systemd not detected, dockerd-rootless.sh needs to be started manually: +... +``` +`rootlesskit` cannot detect systemd properly if you switch to your user via `sudo su`. For users which cannot be logged-in, you must use the `machinectl` command which is part of the `systemd-container` package. After installing `systemd-container` switch to `myuser` with the following command: +``` console +$ sudo machinectl shell myuser@ +``` +Where `myuser@` is your desired username and @ signifies this machine. + +### Errors when starting the Docker daemon + +**\[rootlesskit:parent\] error: failed to start the child: fork/exec /proc/self/exe: operation not permitted** + +This error occurs mostly when the value of `/proc/sys/kernel/unprivileged_userns_clone` is set to 0: + +```console +$ cat /proc/sys/kernel/unprivileged_userns_clone +0 +``` + +To fix this issue, add `kernel.unprivileged_userns_clone=1` to +`/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl --system`. + +**\[rootlesskit:parent\] error: failed to start the child: fork/exec /proc/self/exe: no space left on device** + +This error occurs mostly when the value of `/proc/sys/user/max_user_namespaces` is too small: + +```console +$ cat /proc/sys/user/max_user_namespaces +0 +``` + +To fix this issue, add `user.max_user_namespaces=28633` to +`/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl --system`. + +**\[rootlesskit:parent\] error: failed to setup UID/GID map: failed to compute uid/gid map: No subuid ranges found for user 1001 ("testuser")** + +This error occurs when `/etc/subuid` and `/etc/subgid` are not configured. See [Prerequisites](./_index.md#prerequisites). + +**could not get XDG_RUNTIME_DIR** + +This error occurs when `$XDG_RUNTIME_DIR` is not set. + +On a non-systemd host, you need to create a directory and then set the path: + +```console +$ export XDG_RUNTIME_DIR=$HOME/.docker/xrd +$ rm -rf $XDG_RUNTIME_DIR +$ mkdir -p $XDG_RUNTIME_DIR +$ dockerd-rootless.sh +``` + +> [!NOTE] +> +> You must remove the directory every time you log out. + +On a systemd host, log into the host using `pam_systemd` (see below). +The value is automatically set to `/run/user/$UID` and cleaned up on every logout. + +**`systemctl --user` fails with "Failed to connect to bus: No such file or directory"** + +This error occurs mostly when you switch from the root user to a non-root user with `sudo`: + +```console +# sudo -iu testuser +$ systemctl --user start docker +Failed to connect to bus: No such file or directory +``` + +Instead of `sudo -iu `, you need to log in using `pam_systemd`. For example: + +- Log in through the graphic console +- `ssh @localhost` +- `machinectl shell @` + +**The daemon does not start up automatically** + +You need `sudo loginctl enable-linger $(whoami)` to enable the daemon to start +up automatically. See [Advanced Usage](./tips.md/#advanced-usage). + +### `docker pull` errors + +**docker: failed to register layer: Error processing tar file(exit status 1): lchown <FILE>: invalid argument** + +This error occurs when the number of available entries in `/etc/subuid` or +`/etc/subgid` is not sufficient. The number of entries required vary across +images. However, 65,536 entries are sufficient for most images. See +[Prerequisites](./_index.md#prerequisites). + +**docker: failed to register layer: ApplyLayer exit status 1 stdout: stderr: lchown <FILE>: operation not permitted** + +This error occurs mostly when `~/.local/share/docker` is located on NFS. + +A workaround is to specify non-NFS `data-root` directory in `~/.config/docker/daemon.json` as follows: +```json +{"data-root":"/somewhere-out-of-nfs"} +``` + +### `docker run` errors + +**docker: Error response from daemon: OCI runtime create failed: ...: read unix @->/run/systemd/private: read: connection reset by peer: unknown.** + +This error occurs on cgroup v2 hosts mostly when the dbus daemon is not running for the user. + +```console +$ systemctl --user is-active dbus +inactive + +$ docker run hello-world +docker: Error response from daemon: OCI runtime create failed: container_linux.go:380: starting container process caused: process_linux.go:385: applying cgroup configuration for process caused: error while starting unit "docker +-931c15729b5a968ce803784d04c7421f791d87e5ca1891f34387bb9f694c488e.scope" with properties [{Name:Description Value:"libcontainer container 931c15729b5a968ce803784d04c7421f791d87e5ca1891f34387bb9f694c488e"} {Name:Slice Value:"use +r.slice"} {Name:PIDs Value:@au [4529]} {Name:Delegate Value:true} {Name:MemoryAccounting Value:true} {Name:CPUAccounting Value:true} {Name:IOAccounting Value:true} {Name:TasksAccounting Value:true} {Name:DefaultDependencies Val +ue:false}]: read unix @->/run/systemd/private: read: connection reset by peer: unknown. +``` + +To fix the issue, run `sudo apt-get install -y dbus-user-session` or `sudo dnf install -y dbus-daemon`, and then relogin. + +If the error still occurs, try running `systemctl --user enable --now dbus` (without sudo). + +**`--cpus`, `--memory`, and `--pids-limit` are ignored** + +This is an expected behavior on cgroup v1 mode. +To use these flags, the host needs to be configured for enabling cgroup v2. +For more information, see [Limiting resources](./tips.md#limiting-resources). + +### Networking errors + +This section provides troubleshooting tips for networking in rootless mode. + +Networking in rootless mode is supported via network and port drivers in +RootlessKit. Network performance and characteristics depend on the combination +of network and port driver you use. If you're experiencing unexpected behavior +or performance related to networking, review the following table which shows +the configurations supported by RootlessKit, and how they compare: + +| Network driver | Port driver | Net throughput | Port throughput | Source IP propagation | No SUID | Note | +| -------------- | -------------- | -------------- | --------------- | --------------------- | ------- | ---------------------------------------------------------------------------- | +| `slirp4netns` | `builtin` | Slow | Fast ✅ | ❌ | ✅ | Default in a typical setup | +| `vpnkit` | `builtin` | Slow | Fast ✅ | ❌ | ✅ | Default when `slirp4netns` isn't installed | +| `slirp4netns` | `slirp4netns` | Slow | Slow | ✅ | ✅ | | +| `pasta` | `implicit` | Slow | Fast ✅ | ✅ | ✅ | Experimental; Needs pasta version 2023_12_04 or later | +| `lxc-user-nic` | `builtin` | Fast ✅ | Fast ✅ | ❌ | ❌ | Experimental | +| `bypass4netns` | `bypass4netns` | Fast ✅ | Fast ✅ | ✅ | ✅ | **Note:** Not integrated to RootlessKit as it needs a custom seccomp profile | + +For information about troubleshooting specific networking issues, see: + +- [`docker run -p` fails with `cannot expose privileged port`](#docker-run--p-fails-with-cannot-expose-privileged-port) +- [Ping doesn't work](#ping-doesnt-work) +- [`IPAddress` shown in `docker inspect` is unreachable](#ipaddress-shown-in-docker-inspect-is-unreachable) +- [`--net=host` doesn't listen ports on the host network namespace](#--nethost-doesnt-listen-ports-on-the-host-network-namespace) +- [Network is slow](#network-is-slow) +- [`docker run -p` does not propagate source IP addresses](#docker-run--p-does-not-propagate-source-ip-addresses) + +#### `docker run -p` fails with `cannot expose privileged port` + +`docker run -p` fails with this error when a privileged port (< 1024) is specified as the host port. + +```console +$ docker run -p 80:80 nginx:alpine +docker: Error response from daemon: driver failed programming external connectivity on endpoint focused_swanson (9e2e139a9d8fc92b37c36edfa6214a6e986fa2028c0cc359812f685173fa6df7): Error starting userland proxy: error while calling PortManager.AddPort(): cannot expose privileged port 80, you might need to add "net.ipv4.ip_unprivileged_port_start=0" (currently 1024) to /etc/sysctl.conf, or set CAP_NET_BIND_SERVICE on rootlesskit binary, or choose a larger port number (>= 1024): listen tcp 0.0.0.0:80: bind: permission denied. +``` + +When you experience this error, consider using an unprivileged port instead. For example, 8080 instead of 80. + +```console +$ docker run -p 8080:80 nginx:alpine +``` + +To allow exposing privileged ports, see [Exposing privileged ports](./tips.md#exposing-privileged-ports). + +#### Ping doesn't work + +Ping does not work when `/proc/sys/net/ipv4/ping_group_range` is set to `1 0`: + +```console +$ cat /proc/sys/net/ipv4/ping_group_range +1 0 +``` + +For details, see [Routing ping packets](./tips.md#routing-ping-packets). + +#### `IPAddress` shown in `docker inspect` is unreachable + +This is an expected behavior, as the daemon is namespaced inside RootlessKit's +network namespace. Use `docker run -p` instead. + +#### `--net=host` doesn't listen ports on the host network namespace + +This is an expected behavior, as the daemon is namespaced inside RootlessKit's +network namespace. Use `docker run -p` instead. + +#### Network is slow + +Docker with rootless mode uses [slirp4netns](https://github.com/rootless-containers/slirp4netns) as the default network stack if slirp4netns v0.4.0 or later is installed. +If slirp4netns is not installed, Docker falls back to [VPNKit](https://github.com/moby/vpnkit). +Installing slirp4netns may improve the network throughput. + +For more information about network drivers for RootlessKit, see +[RootlessKit documentation](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/network.md). + +Also, changing MTU value may improve the throughput. +The MTU value can be specified by creating `~/.config/systemd/user/docker.service.d/override.conf` with the following content: + +```systemd +[Service] +Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=" +``` + +And then restart the daemon: +```console +$ systemctl --user daemon-reload +$ systemctl --user restart docker +``` + +#### `docker run -p` does not propagate source IP addresses + +This is because Docker in rootless mode uses RootlessKit's `builtin` port +driver by default, which doesn't support source IP propagation. To enable +source IP propagation, you can: + +- Use the `slirp4netns` RootlessKit port driver +- Use the `pasta` RootlessKit network driver, with the `implicit` port driver + +The `pasta` network driver is experimental, but provides improved throughput +performance compared to the `slirp4netns` port driver. The `pasta` driver +requires Docker Engine version 25.0 or later. + +To change the RootlessKit networking configuration: + +1. Create a file at `~/.config/systemd/user/docker.service.d/override.conf`. +2. Add the following contents, depending on which configuration you would like to use: + + - `slirp4netns` + + ```systemd + [Service] + Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=slirp4netns" + Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=slirp4netns" + ``` + + - `pasta` network driver with `implicit` port driver + + ```systemd + [Service] + Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=pasta" + Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=implicit" + ``` + +3. Restart the daemon: + + ```console + $ systemctl --user daemon-reload + $ systemctl --user restart docker + ``` + +For more information about networking options for RootlessKit, see: + +- [Network drivers](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/network.md) +- [Port drivers](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/port.md) + +### Tips for debugging + +**Entering into `dockerd` namespaces** + +The `dockerd-rootless.sh` script executes `dockerd` in its own user, mount, and network namespaces. + +For debugging, you can enter the namespaces by running +`nsenter -U --preserve-credentials -n -m -t $(cat $XDG_RUNTIME_DIR/docker.pid)`. + +## Uninstall + +To remove the systemd service of the Docker daemon, run `dockerd-rootless-setuptool.sh uninstall`: + +```console +$ dockerd-rootless-setuptool.sh uninstall ++ systemctl --user stop docker.service ++ systemctl --user disable docker.service +Removed /home/testuser/.config/systemd/user/default.target.wants/docker.service. +[INFO] Uninstalled docker.service +[INFO] This uninstallation tool does NOT remove Docker binaries and data. +[INFO] To remove data, run: `/usr/bin/rootlesskit rm -rf /home/testuser/.local/share/docker` +``` + +Unset environment variables PATH and DOCKER_HOST if you have added them to `~/.bashrc`. + +To remove the data directory, run `rootlesskit rm -rf ~/.local/share/docker`. + +To remove the binaries, remove `docker-ce-rootless-extras` package if you installed Docker with package managers. +If you installed Docker with https://get.docker.com/rootless ([Install without packages](./_index.md#install)), +remove the binary files under `~/bin`: +```console +$ cd ~/bin +$ rm -f containerd containerd-shim containerd-shim-runc-v2 ctr docker docker-init docker-proxy dockerd dockerd-rootless-setuptool.sh dockerd-rootless.sh rootlesskit rootlesskit-docker-proxy runc vpnkit +``` diff --git a/content/manuals/engine/security/seccomp.md b/content/manuals/engine/security/seccomp.md index 094bdbffe0a..82a2cb65568 100644 --- a/content/manuals/engine/security/seccomp.md +++ b/content/manuals/engine/security/seccomp.md @@ -20,11 +20,10 @@ CONFIG_SECCOMP=y ## Pass a profile for a container -The default `seccomp` profile provides a sane default for running containers with -seccomp and disables around 44 system calls out of 300+. It is moderately -protective while providing wide application compatibility. The default Docker -profile can be found -[here](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json). +The [default `seccomp` profile](https://github.com/moby/profiles/blob/main/seccomp/default.json) +provides a sane default for running containers with seccomp and disables around +44 system calls out of 300+. It is moderately protective while providing wide +application compatibility. In effect, the profile is an allowlist that denies access to system calls by default and then allows specific system calls. The profile works by defining a @@ -72,6 +71,9 @@ the reason each syscall is blocked rather than white-listed. | `init_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. | | `ioperm` | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`. | | `iopl` | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`. | +| `io_uring_enter` | Blocked due to security vulnerabilities that can be exploited to break out of containers. See [moby/moby#46762](https://github.com/moby/moby/pull/46762). | +| `io_uring_register` | Blocked due to security vulnerabilities that can be exploited to break out of containers. See [moby/moby#46762](https://github.com/moby/moby/pull/46762). | +| `io_uring_setup` | Blocked due to security vulnerabilities that can be exploited to break out of containers. See [moby/moby#46762](https://github.com/moby/moby/pull/46762). | | `kcmp` | Restrict process inspection capabilities, already blocked by dropping `CAP_SYS_PTRACE`. | | `kexec_file_load` | Sister syscall of `kexec_load` that does the same thing, slightly different arguments. Also gated by `CAP_SYS_BOOT`. | | `kexec_load` | Deny loading a new kernel for later execution. Also gated by `CAP_SYS_BOOT`. | @@ -96,6 +98,8 @@ the reason each syscall is blocked rather than white-listed. | `setns` | Deny associating a thread with a namespace. Also gated by `CAP_SYS_ADMIN`. | | `settimeofday` | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`. | | `stime` | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`. | +| `socket` | Blocked for `AF_ALG` to prevent in-container privilege escalation via the kernel cryptographic API ([CVE-2026-31431](https://nvd.nist.gov/vuln/detail/CVE-2026-31431)). Also blocked for `AF_VSOCK`. See [moby/moby#52494](https://github.com/moby/moby/pull/52494). | +| `socketcall` | Denied to prevent bypassing socket address family filters on architectures with the legacy `socketcall` multiplexer (i386, s390, MIPS o32). See [moby/moby#52494](https://github.com/moby/moby/pull/52494). | | `swapon` | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`. | | `swapoff` | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`. | | `sysfs` | Obsolete syscall. | diff --git a/content/manuals/engine/security/trust/_index.md b/content/manuals/engine/security/trust/_index.md index 8ac5f3f2192..8c50b075e86 100644 --- a/content/manuals/engine/security/trust/_index.md +++ b/content/manuals/engine/security/trust/_index.md @@ -35,6 +35,16 @@ ensure that the images they pull are signed. Publishers could be individuals or organizations manually signing their content or automated software supply chains signing content as part of their release process. +> [!NOTE] +> +> Docker is retiring DCT for Docker Official Images +> (DOI). You should start planning to transition to a different image signing +> and verification solution (like [Sigstore](https://www.sigstore.dev/) or +> [Notation](https://github.com/notaryproject/notation#readme)). Timelines for the +> complete deprecation of DCT are being finalized and will be published soon. +> +> For more information, see [Retiring Docker Content Trust](https://www.docker.com/blog/retiring-docker-content-trust/). + ### Image tags and DCT An individual image record has the following identifier: @@ -53,7 +63,7 @@ have discretion on which tags they sign. An image repository can contain an image with one tag that is signed and another tag that is not. For example, consider [the Mongo image -repository](https://hub.docker.com/r/library/mongo/tags/). The `latest` +repository](https://hub.docker.com/_/mongo/tags/). The `latest` tag could be unsigned while the `3.1.6` tag could be signed. It is the responsibility of the image publisher to decide if an image tag is signed or not. In this representation, some image tags are signed, others are not: @@ -111,9 +121,19 @@ Within the Docker CLI we can sign and push a container image with the `$ docker trust` command syntax. This is built on top of the Notary feature set. For more information, see the [Notary GitHub repository](https://github.com/theupdateframework/notary). -A prerequisite for signing an image is a Docker Registry with a Notary server -attached (Such as the Docker Hub ). Instructions for -standing up a self-hosted environment can be found [here](/engine/security/trust/deploying_notary/). +A prerequisite for signing an image is a Docker Registry with a Notary server (such as Docker Hub) attached. +Refer to [Deploying Notary](/engine/security/trust/deploying_notary/) for instructions. + +> [!NOTE] +> +> Docker is retiring DCT for Docker Official Images +> (DOI). You should start planning to transition to a different image signing +> and verification solution (like [Sigstore](https://www.sigstore.dev/) or +> [Notation](https://github.com/notaryproject/notation#readme)). Timelines for the +> complete deprecation of DCT are being finalized and will be published soon. +> +> For more information, see [Retiring Docker Content Trust](https://www.docker.com/blog/retiring-docker-content-trust/). + To sign a Docker Image you will need a delegation key pair. These keys can be generated locally using `$ docker trust key generate` or generated diff --git a/content/manuals/engine/security/trust/trust_automation.md b/content/manuals/engine/security/trust/trust_automation.md index 311840d8d13..8d06240d7ee 100644 --- a/content/manuals/engine/security/trust/trust_automation.md +++ b/content/manuals/engine/security/trust/trust_automation.md @@ -16,8 +16,8 @@ When working directly with the Notary client, it uses its [own set of environmen ## Add a delegation private key To automate importing a delegation private key to the local Docker trust store, we -need to pass a passphrase for the new key. This passphrase will be required -everytime that delegation signs a tag. +need to pass a passphrase for the new key. This passphrase will be required +every time that delegation signs a tag. ```console $ export DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE="mypassphrase123" diff --git a/content/manuals/engine/security/trust/trust_delegation.md b/content/manuals/engine/security/trust/trust_delegation.md index df4b8d933ad..8c7a003eb3b 100644 --- a/content/manuals/engine/security/trust/trust_delegation.md +++ b/content/manuals/engine/security/trust/trust_delegation.md @@ -26,7 +26,7 @@ same as the registry URL specified in the image tag (following a similar logic t `$ docker push`). When using Docker Hub or DTR, the notary server URL is the same as the registry URL. However, for self-hosted environments or 3rd party registries, you will need to specify an alternative -URL for the notary server. This is done with: +URL of the notary server. This is done with: ```console $ export DOCKER_CONTENT_TRUST_SERVER=https://: @@ -96,8 +96,7 @@ configure the Notary CLI: The newly created configuration file contains information about the location of your local Docker trust data and the notary server URL. For more detailed information about how to use notary outside of the -Docker Content Trust use cases, refer to the Notary CLI documentation -[here](https://github.com/theupdateframework/notary/blob/master/docs/command_reference.md) +Docker Content Trust use cases, refer to the [Notary CLI documentation](https://github.com/theupdateframework/notary/blob/master/docs/command_reference.md) ## Creating delegation keys @@ -189,8 +188,8 @@ jeff 9deed251daa1aa6f9d5f9b752847647cf8d705da When the first delegation is added to the Notary Server using `$ docker trust`, we automatically initiate trust data for the repository. This includes creating the notary target and snapshots keys, and rotating the snapshot key to be -managed by the notary server. More information on these keys can be found -[here](trust_key_mng.md) +managed by the notary server. More information on these keys can be found in +[Manage keys for content trust](trust_key_mng.md). When initiating a repository, you will need the key and the passphrase of a local Notary Canonical Root Key. If you have not initiated a repository before, and @@ -361,7 +360,7 @@ Successfully removed ben from registry.example.com/admin/demo ``` 2) If you have added additional delegations already and are seeing an error - message that there are no valid signatures in `targest/releases`, you will need + message that there are no valid signatures in `targets/releases`, you will need to resign the `targets/releases` delegation file with the Notary CLI. ```text @@ -374,8 +373,8 @@ Successfully removed ben from registry.example.com/admin/demo $ notary witness registry.example.com/admin/demo targets/releases --publish ``` - More information on the `$ notary witness` command can be found - [here](https://github.com/theupdateframework/notary/blob/master/docs/advanced_usage.md#recovering-a-delegation) + For more information on the `notary witness` command, refer to the + [Notary client advanced usage guide](https://github.com/theupdateframework/notary/blob/master/docs/advanced_usage.md#recovering-a-delegation) ### Removing a contributor's key from a delegation diff --git a/content/manuals/engine/security/trust/trust_sandbox.md b/content/manuals/engine/security/trust/trust_sandbox.md index 8b159b8691c..5da8d932359 100644 --- a/content/manuals/engine/security/trust/trust_sandbox.md +++ b/content/manuals/engine/security/trust/trust_sandbox.md @@ -51,7 +51,7 @@ sandbox is configured to store all the keys and files inside the `trustsandbox` container. Since the keys you create in the sandbox are for play only, destroying the container destroys them as well. -By using a docker-in-docker image for the `trustsandbox` container, you also +By using a `docker-in-docker` image for the `trustsandbox` container, you also don't pollute your real Docker daemon cache with any images you push and pull. The images are stored in an anonymous volume attached to this container, and can be destroyed after you destroy the container. @@ -64,61 +64,68 @@ the `trustsandbox` container, the Notary server, and the Registry server. 1. Create a new `trustsandbox` directory and change into it. - $ mkdir trustsandbox - $ cd trustsandbox + ```console + $ mkdir trustsandbox + $ cd trustsandbox + ``` 2. Create a file called `compose.yaml` with your favorite editor. For example, using vim: - $ touch compose.yaml - $ vim compose.yaml + ```console + $ touch compose.yaml + $ vim compose.yaml + ``` 3. Add the following to the new file. - version: "2" - services: - notaryserver: - image: dockersecurity/notary_autobuilds:server-v0.5.1 - volumes: - - notarycerts:/var/lib/notary/fixtures - networks: - - sandbox - environment: - - NOTARY_SERVER_STORAGE_TYPE=memory - - NOTARY_SERVER_TRUST_SERVICE_TYPE=local - sandboxregistry: - image: registry:2.4.1 - networks: - - sandbox - container_name: sandboxregistry - trustsandbox: - image: docker:dind - networks: - - sandbox - volumes: - - notarycerts:/notarycerts - privileged: true - container_name: trustsandbox - entrypoint: "" - command: |- - sh -c ' - cp /notarycerts/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt && - update-ca-certificates && - dockerd-entrypoint.sh --insecure-registry sandboxregistry:5000' - volumes: - notarycerts: - external: false - networks: - sandbox: - external: false - + ```yaml + version: "2" + services: + notaryserver: + image: dockersecurity/notary_autobuilds:server-v0.5.1 + volumes: + - notarycerts:/var/lib/notary/fixtures + networks: + - sandbox + environment: + - NOTARY_SERVER_STORAGE_TYPE=memory + - NOTARY_SERVER_TRUST_SERVICE_TYPE=local + sandboxregistry: + image: registry:3 + networks: + - sandbox + container_name: sandboxregistry + trustsandbox: + image: docker:dind + networks: + - sandbox + volumes: + - notarycerts:/notarycerts + privileged: true + container_name: trustsandbox + entrypoint: "" + command: |- + sh -c ' + cp /notarycerts/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt && + update-ca-certificates && + dockerd-entrypoint.sh --insecure-registry sandboxregistry:5000' + volumes: + notarycerts: + external: false + networks: + sandbox: + external: false + ``` 4. Save and close the file. 5. Run the containers on your local system. - $ docker compose up -d + ```console + $ docker compose up -d + ``` - The first time you run this, the docker-in-docker, Notary server, and registry - images are downloaded from Docker Hub. + The first time you run this, the `docker-in-docker`, Notary server, and registry + images are downloaded from Docker Hub. ## Play in the sandbox @@ -127,8 +134,10 @@ Now that everything is setup, you can go into your `trustsandbox` container and start testing Docker content trust. From your host machine, obtain a shell in the `trustsandbox` container. - $ docker container exec -it trustsandbox sh - / # +```console +$ docker container exec -it trustsandbox sh +/ # +``` ### Test some trust operations @@ -136,59 +145,68 @@ Now, pull some images from within the `trustsandbox` container. 1. Download a `docker` image to test with. - / # docker pull docker/trusttest - docker pull docker/trusttest - Using default tag: latest - latest: Pulling from docker/trusttest - - b3dbab3810fc: Pull complete - a9539b34a6ab: Pull complete - Digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a - Status: Downloaded newer image for docker/trusttest:latest + ```console + / # docker pull docker/trusttest + docker pull docker/trusttest + Using default tag: latest + latest: Pulling from docker/trusttest + b3dbab3810fc: Pull complete + a9539b34a6ab: Pull complete + Digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a + Status: Downloaded newer image for docker/trusttest:latest + ``` -2. Tag it to be pushed to our sandbox registry: +2. Tag it to be pushed to your sandbox registry: - / # docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest + ```console + / # docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest + ``` 3. Enable content trust. - / # export DOCKER_CONTENT_TRUST=1 + ```console + / # export DOCKER_CONTENT_TRUST=1 + ``` 4. Identify the trust server. - / # export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443 + ```console + / # export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443 + ``` This step is only necessary because the sandbox is using its own server. Normally, if you are using the Docker Public Hub this step isn't necessary. 5. Pull the test image. - / # docker pull sandboxregistry:5000/test/trusttest - Using default tag: latest - Error: remote trust data does not exist for sandboxregistry:5000/test/trusttest: notaryserver:4443 does not have trust data for sandboxregistry:5000/test/trusttest - + ```console + / # docker pull sandboxregistry:5000/test/trusttest + Using default tag: latest + Error: remote trust data does not exist for sandboxregistry:5000/test/trusttest: notaryserver:4443 does not have trust data for sandboxregistry:5000/test/trusttest + ``` You see an error, because this content doesn't exist on the `notaryserver` yet. 6. Push and sign the trusted image. - - / # docker push sandboxregistry:5000/test/trusttest:latest - The push refers to a repository [sandboxregistry:5000/test/trusttest] - 5f70bf18a086: Pushed - c22f7bc058a9: Pushed - latest: digest: sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 size: 734 - Signing and pushing trust metadata - You are about to create a new root signing key passphrase. This passphrase - will be used to protect the most sensitive key in your signing system. Please - choose a long, complex passphrase and be careful to keep the password and the - key file itself secure and backed up. It is highly recommended that you use a - password manager to generate the passphrase and keep it safe. There will be no - way to recover this key. You can find the key in your config directory. - Enter passphrase for new root key with ID 27ec255: - Repeat passphrase for new root key with ID 27ec255: - Enter passphrase for new repository key with ID 58233f9 (sandboxregistry:5000/test/trusttest): - Repeat passphrase for new repository key with ID 58233f9 (sandboxregistry:5000/test/trusttest): - Finished initializing "sandboxregistry:5000/test/trusttest" - Successfully signed "sandboxregistry:5000/test/trusttest":latest + ```console + / # docker push sandboxregistry:5000/test/trusttest:latest + The push refers to a repository [sandboxregistry:5000/test/trusttest] + 5f70bf18a086: Pushed + c22f7bc058a9: Pushed + latest: digest: sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 size: 734 + Signing and pushing trust metadata + You are about to create a new root signing key passphrase. This passphrase + will be used to protect the most sensitive key in your signing system. Please + choose a long, complex passphrase and be careful to keep the password and the + key file itself secure and backed up. It is highly recommended that you use a + password manager to generate the passphrase and keep it safe. There will be no + way to recover this key. You can find the key in your config directory. + Enter passphrase for new root key with ID 27ec255: + Repeat passphrase for new root key with ID 27ec255: + Enter passphrase for new repository key with ID 58233f9 (sandboxregistry:5000/test/trusttest): + Repeat passphrase for new repository key with ID 58233f9 (sandboxregistry:5000/test/trusttest): + Finished initializing "sandboxregistry:5000/test/trusttest" + Successfully signed "sandboxregistry:5000/test/trusttest":latest + ``` Because you are pushing this repository for the first time, Docker creates new root and repository keys and asks you for passphrases with which to @@ -197,13 +215,15 @@ Now, pull some images from within the `trustsandbox` container. 7. Try pulling the image you just pushed: - / # docker pull sandboxregistry:5000/test/trusttest - Using default tag: latest - Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 - sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926: Pulling from test/trusttest - Digest: sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 - Status: Downloaded newer image for sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 - Tagging sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 as sandboxregistry:5000/test/trusttest:latest + ```console + / # docker pull sandboxregistry:5000/test/trusttest + Using default tag: latest + Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 + sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926: Pulling from test/trusttest + Digest: sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 + Status: Downloaded newer image for sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 + Tagging sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 as sandboxregistry:5000 test/trusttest:latest + ``` ### Test with malicious images @@ -212,15 +232,17 @@ What happens when data is corrupted and you try to pull it when trust is enabled? In this section, you go into the `sandboxregistry` and tamper with some data. Then, you try and pull it. -1. Leave the `trustsandbox` shell and container running. +1. Leave the `trustsandbox` shell and container running. -2. Open a new interactive terminal from your host, and obtain a shell into the - `sandboxregistry` container. +2. Open a new interactive terminal from your host, and obtain a shell into the + `sandboxregistry` container. - $ docker container exec -it sandboxregistry bash - root@65084fc6f047:/# + ```console + $ docker container exec -it sandboxregistry bash + root@65084fc6f047:/# + ``` -3. List the layers for the `test/trusttest` image you pushed: +3. List the layers for the `test/trusttest` image you pushed: ```console root@65084fc6f047:/# ls -l /var/lib/registry/docker/registry/v2/repositories/test/trusttest/_layers/sha256 @@ -230,51 +252,61 @@ data. Then, you try and pull it. drwxr-xr-x 2 root root 4096 Jun 10 17:26 cc7629d1331a7362b5e5126beb5bf15ca0bf67eb41eab994c719a45de53255cd ``` -4. Change into the registry storage for one of those layers (this is in a different directory): +4. Change into the registry storage for one of those layers (this is in a different directory): - root@65084fc6f047:/# cd /var/lib/registry/docker/registry/v2/blobs/sha256/aa/aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 + ```console + root@65084fc6f047:/# cd /var/lib/registry/docker/registry/v2/blobs/sha256/aa/aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 + ``` -5. Add malicious data to one of the `trusttest` layers: +5. Add malicious data to one of the `trusttest` layers: - root@65084fc6f047:/# echo "Malicious data" > data + ```console + root@65084fc6f047:/# echo "Malicious data" > data + ``` -6. Go back to your `trustsandbox` terminal. +6. Go back to your `trustsandbox` terminal. -7. List the `trusttest` image. +7. List the `trusttest` image. - / # docker image ls | grep trusttest - REPOSITORY TAG IMAGE ID CREATED SIZE - docker/trusttest latest cc7629d1331a 11 months ago 5.025 MB - sandboxregistry:5000/test/trusttest latest cc7629d1331a 11 months ago 5.025 MB - sandboxregistry:5000/test/trusttest cc7629d1331a 11 months ago 5.025 MB + ```console + / # docker image ls | grep trusttest + REPOSITORY TAG IMAGE ID CREATED SIZE + docker/trusttest latest cc7629d1331a 11 months ago 5.025 MB + sandboxregistry:5000/test/trusttest latest cc7629d1331a 11 months ago 5.025 MB + sandboxregistry:5000/test/trusttest cc7629d1331a 11 months ago 5.025 MB + ``` -8. Remove the `trusttest:latest` image from our local cache. +8. Remove the `trusttest:latest` image from your local cache. - / # docker image rm -f cc7629d1331a - Untagged: docker/trusttest:latest - Untagged: sandboxregistry:5000/test/trusttest:latest - Untagged: sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 - Deleted: sha256:cc7629d1331a7362b5e5126beb5bf15ca0bf67eb41eab994c719a45de53255cd - Deleted: sha256:2a1f6535dc6816ffadcdbe20590045e6cbf048d63fd4cc753a684c9bc01abeea - Deleted: sha256:c22f7bc058a9a8ffeb32989b5d3338787e73855bf224af7aa162823da015d44c + ```console + / # docker image rm -f cc7629d1331a + Untagged: docker/trusttest:latest + Untagged: sandboxregistry:5000/test/trusttest:latest + Untagged: sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 + Deleted: sha256:cc7629d1331a7362b5e5126beb5bf15ca0bf67eb41eab994c719a45de53255cd + Deleted: sha256:2a1f6535dc6816ffadcdbe20590045e6cbf048d63fd4cc753a684c9bc01abeea + Deleted: sha256:c22f7bc058a9a8ffeb32989b5d3338787e73855bf224af7aa162823da015d44c + ``` - Docker does not re-download images that it already has cached, but we want - Docker to attempt to download the tampered image from the registry and reject - it because it is invalid. + Docker does not re-download images that it already has cached, but you want + Docker to attempt to download the tampered image from the registry and reject + it because it is invalid. -9. Pull the image again. This downloads the image from the registry, because we don't have it cached. +9. Pull the image again. This downloads the image from the registry, because you don't have it cached. - / # docker pull sandboxregistry:5000/test/trusttest - Using default tag: latest - Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:35d5bc26fd358da8320c137784fe590d8fcf9417263ef261653e8e1c7f15672e - sha256:35d5bc26fd358da8320c137784fe590d8fcf9417263ef261653e8e1c7f15672e: Pulling from test/trusttest + ```console + / # docker pull sandboxregistry:5000/test/trusttest + Using default tag: latest + Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:35d5bc26fd358da8320c137784fe590d8fcf9417263ef261653e8e1c7f15672e + sha256:35d5bc26fd358da8320c137784fe590d8fcf9417263ef261653e8e1c7f15672e: Pulling from test/trusttest - aac0c133338d: Retrying in 5 seconds - a3ed95caeb02: Download complete - error pulling image configuration: unexpected EOF + aac0c133338d: Retrying in 5 seconds + a3ed95caeb02: Download complete + error pulling image configuration: unexpected EOF + ``` - The pull did not complete because the trust system couldn't verify the - image. + The pull did not complete because the trust system couldn't verify the + image. ## More play in the sandbox @@ -289,4 +321,6 @@ When you are done, and want to clean up all the services you've started and any anonymous volumes that have been created, just run the following command in the directory where you've created your Docker Compose file: - $ docker compose down -v + ```console + $ docker compose down -v + ``` diff --git a/content/manuals/engine/security/userns-remap.md b/content/manuals/engine/security/userns-remap.md index 57dfe29986b..c04d8afcb50 100644 --- a/content/manuals/engine/security/userns-remap.md +++ b/content/manuals/engine/security/userns-remap.md @@ -16,13 +16,19 @@ can re-map this user to a less-privileged user on the Docker host. The mapped user is assigned a range of UIDs which function within the namespace as normal UIDs from 0 to 65536, but have no privileges on the host machine itself. +> [!NOTE] +> +> With `userns-remap`, the Docker daemon still runs as root. To run both the +> daemon and containers without root privileges, see [Rootless mode](rootless/_index.md) +> instead. + ## About remapping and subordinate user and group IDs The remapping itself is handled by two files: `/etc/subuid` and `/etc/subgid`. Each file works the same, but one is concerned with the user ID range, and the other with the group ID range. Consider the following entry in `/etc/subuid`: -```none +```text testuser:231072:65536 ``` @@ -50,7 +56,7 @@ purpose. > [!WARNING] > > Some distributions do not automatically add the new group to the -> `/etc/subuid` and `/etc/subgid` files. If that's the case, you are may have +> `/etc/subuid` and `/etc/subgid` files. If that's the case, you may have > to manually edit these files and assign non-overlapping ranges. This step is > covered in [Prerequisites](#prerequisites). @@ -93,7 +99,7 @@ avoid these situations. and a maximum number of UIDs or GIDs available to the user. For instance, given the following entry: - ```none + ```text testuser:231072:65536 ``` diff --git a/content/manuals/engine/storage/_index.md b/content/manuals/engine/storage/_index.md index d616e302867..d1ed0a9ee8b 100644 --- a/content/manuals/engine/storage/_index.md +++ b/content/manuals/engine/storage/_index.md @@ -8,6 +8,22 @@ aliases: - /storage/ --- +Docker storage covers two different concepts: + +**Container data persistence** (this page): How to store application data +outside containers using volumes, bind mounts, and tmpfs mounts. This data +persists independently of container lifecycle. + +**Daemon storage backends** ([containerd image store](containerd.md) and +[storage drivers](drivers/)): How the daemon stores image layers and container +writable layers on disk. + +This page focuses on container data persistence. For information about how +Docker stores images and container layers, see +[containerd image store](containerd.md) or [Storage drivers](drivers/). + +## Container layer basics + By default all files created inside a container are stored on a writable container layer that sits on top of the read-only, immutable image layers. @@ -78,9 +94,13 @@ Docker Engine API using a named pipe. ## Next steps -- Learn more about [volumes](./volumes.md). -- Learn more about [bind mounts](./bind-mounts.md). -- Learn more about [tmpfs mounts](./tmpfs.md). -- Learn more about [storage drivers](/engine/storage/drivers/), which - are not related to bind mounts or volumes, but allow you to store data in a - container's writable layer. +Learn more about container data persistence: + +- [Volumes](./volumes.md) +- [Bind mounts](./bind-mounts.md) +- [tmpfs mounts](./tmpfs.md) + +Learn more about daemon storage backends: + +- [containerd image store](containerd.md) +- [Storage drivers](drivers/) diff --git a/content/manuals/engine/storage/bind-mounts.md b/content/manuals/engine/storage/bind-mounts.md index 4b5d07c09d9..f55e2bb8314 100644 --- a/content/manuals/engine/storage/bind-mounts.md +++ b/content/manuals/engine/storage/bind-mounts.md @@ -10,8 +10,9 @@ aliases: When you use a bind mount, a file or directory on the host machine is mounted from the host into a container. By contrast, when you use a volume, a new -directory is created within Docker's storage directory on the host machine, and -Docker manages that directory's contents. +directory is created within Docker's storage directory on the host machine. +Docker creates and maintains this storage location, but containers access it +directly using standard filesystem operations. ## When to use bind mounts @@ -87,7 +88,7 @@ If you use `--volume` to bind-mount a file or directory that does not yet exist on the Docker host, Docker automatically creates the directory on the host for you. It's always created as a directory. -`--mount` does not automatically create a directory if the specified mount +By default, `--mount` does not automatically create a directory if the specified mount path does not exist on the host. Instead, it produces an error: ```console @@ -95,6 +96,13 @@ $ docker run --mount type=bind,src=/dev/noexist,dst=/mnt/foo alpine docker: Error response from daemon: invalid mount config for type "bind": bind source path does not exist: /dev/noexist. ``` +You can use the `bind-create-src` option to automatically create the source directory +on the host if it doesn't exist: + +```console +$ docker run --mount type=bind,src=/home/user/mydir,dst=/mnt/foo,bind-create-src alpine +``` + ### Options for --mount The `--mount` flag consists of multiple key-value pairs, separated by commas @@ -107,12 +115,13 @@ $ docker run --mount type=bind,src=,dst=[,=}} +The containerd image store is the default storage backend for Docker Engine +29.0 and later on fresh installations. If you upgraded from an earlier version, +your daemon continues using the legacy graph drivers (overlay2) until you +enable the containerd image store. containerd, the industry-standard container runtime, uses snapshotters instead -of the classic storage drivers for storing image and container data. -While the `overlay2` driver still remains the default driver for Docker Engine, -you can opt in to using containerd snapshotters as an experimental feature. +of classic storage drivers for storing image and container data. -To learn more about the containerd image store and its benefits, refer to -[containerd image store on Docker Desktop](/manuals/desktop/features/containerd.md). +> [!NOTE] +> The containerd image store is not available when using user namespace +> remapping (`userns-remap`). See +> [moby#47377](https://github.com/moby/moby/issues/47377) for details. + +## Why use the containerd image store + +The containerd image store uses snapshotters to manage how image layers are +stored and accessed on the filesystem. This differs from the classic graph +drivers like overlay2. + +The containerd image store enables: + +- Building and storing multi-platform images locally. With classic storage + drivers, you need external builders for multi-platform images. +- Working with images that include attestations (provenance, SBOM). These use + image indices that the classic store doesn't support. +- Running Wasm containers. The containerd image store supports WebAssembly + workloads. +- Using advanced snapshotters. containerd supports pluggable snapshotters that + provide features like lazy-pulling of images (stargz) or peer-to-peer image + distribution (nydus, dragonfly). + +For most users, switching to the containerd image store is transparent. The +storage backend changes, but your workflows remain the same. + +## Disk space usage + +The containerd image store uses more disk space than the legacy storage +drivers for the same images. This is because containerd stores images in both +compressed and uncompressed formats, while the legacy drivers stored only the +uncompressed layers. + +When you pull an image, containerd keeps the compressed layers (as received +from the registry) and also extracts them to disk. This dual storage means +each layer occupies more space. The compressed format enables faster pulls and +pushes, but requires additional disk capacity. + +This difference is particularly noticeable with multiple images sharing the +same base layers. With legacy storage drivers, shared base layers were stored +once locally, and reused images that depended on them. With containerd, each +image stores its own compressed version of shared layers, even though the +uncompressed layers are still de-duplicated through snapshotters. The +compressed storage adds overhead proportional to the number of images using +those layers. + +If disk space is constrained, consider the following: + +- Regularly prune unused images with `docker image prune` +- Use `docker system df` to monitor disk usage +- [Configure the data directory](../daemon/_index.md#configure-the-data-directory-location) + to use a partition with adequate space ## Enable containerd image store on Docker Engine -Switching to containerd snapshotters causes you to temporarily lose images and -containers created using the classic storage drivers. -Those resources still exist on your filesystem, and you can retrieve them by -turning off the containerd snapshotters feature. +If you're upgrading from an earlier Docker Engine version, you need to manually +enable the containerd image store. -The following steps explain how to enable the containerd snapshotters feature. +> [!IMPORTANT] +> Switching storage backends temporarily hides images and containers created +> with the other backend. Your data remains on disk. To access the old images +> again, switch back to your previous storage configuration. -1. Add the following configuration to your `/etc/docker/daemon.json` - configuration file: +Add the following configuration to your `/etc/docker/daemon.json` file: - ```json - { - "features": { - "containerd-snapshotter": true - } - } - ``` +```json +{ + "features": { + "containerd-snapshotter": true + } +} +``` -2. Save the file. -3. Restart the daemon for the changes to take effect. +Save the file and restart the daemon: - ```console - $ sudo systemctl restart docker - ``` +```console +$ sudo systemctl restart docker +``` -After restarting the daemon, running `docker info` shows that you're using -containerd snapshotter storage drivers. +After restarting the daemon, verify you're using the containerd image store: ```console $ docker info -f '{{ .DriverStatus }}' @@ -54,3 +103,58 @@ $ docker info -f '{{ .DriverStatus }}' ``` Docker Engine uses the `overlayfs` containerd snapshotter by default. + +> [!NOTE] +> When you enable the containerd image store, existing images and containers +> from the overlay2 driver remain on disk but become hidden. They reappear if +> you switch back to overlay2. To use your existing images with the containerd +> image store, push them to a registry first, or use `docker save` to export +> them. + +## Experimental automatic migration + +Docker Engine includes an experimental feature that can automatically switch to +the containerd image store under certain conditions. **This feature is +experimental**. It's provided for those who want to test it, but [starting +fresh](#enable-containerd-image-store-on-docker-engine) is the recommended +approach. + +> [!CAUTION] +> The automatic migration feature is experimental and may not work reliably in +> all scenarios. Create backups before attempting to use it. + +To enable automatic migration, add the `containerd-migration` feature to your +`/etc/docker/daemon.json`: + +```json +{ + "features": { + "containerd-migration": true + } +} +``` + +You can also set the `DOCKER_MIGRATE_SNAPSHOTTER_THRESHOLD` environment +variable to make the daemon switch automatically if you have no containers and +your image count is at or below the threshold. For systemd: + +```console +$ sudo systemctl edit docker.service +``` + +Add: + +```ini +[Service] +Environment="DOCKER_MIGRATE_SNAPSHOTTER_THRESHOLD=5" +``` + +If you have no running or stopped containers and 5 or fewer images, the daemon +switches to the containerd image store on restart. Your overlay2 data remains +on disk but becomes hidden. + +## Additional resources + +To learn more about the containerd image store and its capabilities in Docker +Desktop, see +[containerd image store on Docker Desktop](/manuals/desktop/features/containerd.md). diff --git a/content/manuals/engine/storage/drivers/_index.md b/content/manuals/engine/storage/drivers/_index.md index c6241730936..1bef91efea8 100644 --- a/content/manuals/engine/storage/drivers/_index.md +++ b/content/manuals/engine/storage/drivers/_index.md @@ -1,6 +1,6 @@ --- description: Learn the technologies that support storage drivers. -keywords: container, storage, driver, btrfs, overlayfs, vfs, zfs +keywords: container, storage, driver, btrfs, overlayfs, vfs, zfs, containerd title: Storage drivers weight: 40 aliases: @@ -9,6 +9,16 @@ aliases: - /engine/userguide/storagedriver/imagesandcontainers/ --- +> [!NOTE] +> Docker Engine 29.0 and later uses the +> [containerd image store](../containerd.md) by default for fresh installations. +> The containerd image store uses snapshotters instead of the classic storage +> drivers described on this page. If you're running a fresh installation of +> Docker Engine 29.0 or later, or if you've migrated to the containerd image +> store, this page provides background on how image layers work but the +> implementation details differ. For information about the containerd image +> store, see [containerd image store](../containerd.md). + To use storage drivers effectively, it's important to know how Docker builds and stores images, and how these images are used by containers. You can use this information to make informed choices about the best way to persist data from diff --git a/content/manuals/engine/storage/drivers/btrfs-driver.md b/content/manuals/engine/storage/drivers/btrfs-driver.md index 720f6d59be8..ecfa3179ea3 100644 --- a/content/manuals/engine/storage/drivers/btrfs-driver.md +++ b/content/manuals/engine/storage/drivers/btrfs-driver.md @@ -6,6 +6,15 @@ aliases: - /storage/storagedriver/btrfs-driver/ --- +> [!IMPORTANT] +> +> In most cases you should use the `overlay2` storage driver - it's not +> required to use the `btrfs` storage driver simply because your system uses +> Btrfs as its root filesystem. +> +> Btrfs driver has known issues. See [Moby issue #27653](https://github.com/moby/moby/issues/27653) +> for more information. + Btrfs is a copy-on-write filesystem that supports many advanced storage technologies, making it a good fit for Docker. Btrfs is included in the mainline Linux kernel. diff --git a/content/manuals/engine/storage/drivers/device-mapper-driver.md b/content/manuals/engine/storage/drivers/device-mapper-driver.md index 7eb9de9bb6a..c9aa3f7d655 100644 --- a/content/manuals/engine/storage/drivers/device-mapper-driver.md +++ b/content/manuals/engine/storage/drivers/device-mapper-driver.md @@ -297,7 +297,7 @@ assumes that the Docker daemon is in the `stopped` state. The example below adds 20% more capacity when the disk usage reaches 80%. - ```none + ```text activation { thin_pool_autoextend_threshold=80 thin_pool_autoextend_percent=20 diff --git a/content/manuals/engine/storage/drivers/overlayfs-driver.md b/content/manuals/engine/storage/drivers/overlayfs-driver.md index 5064fb0c32b..cdec2f68258 100644 --- a/content/manuals/engine/storage/drivers/overlayfs-driver.md +++ b/content/manuals/engine/storage/drivers/overlayfs-driver.md @@ -12,13 +12,18 @@ This page refers to the Linux kernel driver as `OverlayFS` and to the Docker storage driver as `overlay2`. > [!NOTE] -> +> Docker Engine 29.0 and later uses the +> [containerd image store](/manuals/engine/storage/containerd.md) by default. +> The `overlay2` driver is a legacy storage driver that is superseded by the +> `overlayfs` containerd snapshotter. For more information, see +> [Select a storage driver](/manuals/engine/storage/drivers/select-storage-driver.md). + +> [!NOTE] > For `fuse-overlayfs` driver, check [Rootless mode documentation](/manuals/engine/security/rootless.md). ## Prerequisites -OverlayFS is the recommended storage driver, and supported if you meet the following -prerequisites: +The `overlay2` driver is supported if you meet the following prerequisites: - Version 4.0 or higher of the Linux kernel, or RHEL or CentOS using version 3.10.0-514 of the kernel or higher. @@ -219,7 +224,7 @@ the image's top layer plus a new directory for the container. The image's layers are the `lowerdirs` in the overlay and are read-only. The new directory for the container is the `upperdir` and is writable. -### Image and container layers on-disk +### Image and container layers on-disk (legacy overlay driver) The following `docker pull` command shows a Docker host downloading a Docker image comprising five layers. @@ -437,25 +442,25 @@ filesystems: [`open(2)`](https://linux.die.net/man/2/open) : OverlayFS only implements a subset of the POSIX standards. - This can result in certain OverlayFS operations breaking POSIX standards. One - such operation is the copy-up operation. Suppose that your application calls - `fd1=open("foo", O_RDONLY)` and then `fd2=open("foo", O_RDWR)`. In this case, - your application expects `fd1` and `fd2` to refer to the same file. However, due - to a copy-up operation that occurs after the second calling to `open(2)`, the - descriptors refer to different files. The `fd1` continues to reference the file - in the image (`lowerdir`) and the `fd2` references the file in the container - (`upperdir`). A workaround for this is to `touch` the files which causes the - copy-up operation to happen. All subsequent `open(2)` operations regardless of - read-only or read-write access mode reference the file in the - container (`upperdir`). - - `yum` is known to be affected unless the `yum-plugin-ovl` package is installed. - If the `yum-plugin-ovl` package is not available in your distribution such as - RHEL/CentOS prior to 6.8 or 7.2, you may need to run `touch /var/lib/rpm/*` - before running `yum install`. This package implements the `touch` workaround - referenced above for `yum`. +This can result in certain OverlayFS operations breaking POSIX standards. One +such operation is the copy-up operation. Suppose that your application calls +`fd1=open("foo", O_RDONLY)` and then `fd2=open("foo", O_RDWR)`. In this case, +your application expects `fd1` and `fd2` to refer to the same file. However, due +to a copy-up operation that occurs after the second calling to `open(2)`, the +descriptors refer to different files. The `fd1` continues to reference the file +in the image (`lowerdir`) and the `fd2` references the file in the container +(`upperdir`). A workaround for this is to `touch` the files which causes the +copy-up operation to happen. All subsequent `open(2)` operations regardless of +read-only or read-write access mode reference the file in the +container (`upperdir`). + +`yum` is known to be affected unless the `yum-plugin-ovl` package is installed. +If the `yum-plugin-ovl` package is not available in your distribution such as +RHEL/CentOS prior to 6.8 or 7.2, you may need to run `touch /var/lib/rpm/*` +before running `yum install`. This package implements the `touch` workaround +referenced above for `yum`. [`rename(2)`](https://linux.die.net/man/2/rename) : OverlayFS does not fully support the `rename(2)` system call. Your - application needs to detect its failure and fall back to a "copy and unlink" - strategy. +application needs to detect its failure and fall back to a "copy and unlink" +strategy. diff --git a/content/manuals/engine/storage/drivers/select-storage-driver.md b/content/manuals/engine/storage/drivers/select-storage-driver.md index fe441ce5fc8..0367e6456fd 100644 --- a/content/manuals/engine/storage/drivers/select-storage-driver.md +++ b/content/manuals/engine/storage/drivers/select-storage-driver.md @@ -2,7 +2,7 @@ title: Select a storage driver weight: 10 description: Learn how to select the proper storage driver for your container. -keywords: container, storage, driver, btrfs, zfs, overlay, overlay2 +keywords: container, storage, driver, btrfs, zfs, overlay, overlay2, containerd aliases: - /storage/storagedriver/selectadriver/ - /storage/storagedriver/select-storage-driver/ @@ -13,6 +13,14 @@ use Docker volumes to write data. However, some workloads require you to be able to write to the container's writable layer. This is where storage drivers come in. +> [!NOTE] +> Docker Engine 29.0 and later uses the +> [containerd image store](../containerd.md) by default for fresh installations. +> If you upgraded from an earlier version, your daemon continues using the +> classic storage drivers described on this page. You can migrate to the +> containerd image store following the instructions in the +> [containerd image store](../containerd.md) documentation. + Docker supports several storage drivers, using a pluggable architecture. The storage driver controls how images and containers are stored and managed on your Docker host. After you have read the [storage driver overview](./_index.md), the @@ -25,21 +33,22 @@ driver with the best overall performance and stability in the most usual scenari > storage driver is windowsfilter. For more information, see > [windowsfilter](windowsfilter-driver.md). -The Docker Engine provides the following storage drivers on Linux: +The Docker Engine provides the following storage backends on Linux: -| Driver | Description | -| :---------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `overlay2` | `overlay2` is the preferred storage driver for all currently supported Linux distributions, and requires no extra configuration. | -| `fuse-overlayfs` | `fuse-overlayfs`is preferred only for running Rootless Docker on an old host that does not provide support for rootless `overlay2`. The `fuse-overlayfs` driver does not need to be used since Linux kernel 5.11, and `overlay2` works even in rootless mode. Refer to the [rootless mode documentation](/manuals/engine/security/rootless.md) for details. | -| `btrfs` and `zfs` | The `btrfs` and `zfs` storage drivers allow for advanced options, such as creating "snapshots", but require more maintenance and setup. Each of these relies on the backing filesystem being configured correctly. | -| `vfs` | The `vfs` storage driver is intended for testing purposes, and for situations where no copy-on-write filesystem can be used. Performance of this storage driver is poor, and is not generally recommended for production use. | +| Backend | Description | +| :-------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `containerd` (snapshotters) | The default for Docker Engine 29.0 and later. Uses containerd snapshotters for image storage. Supports multi-platform images and attestations. See [containerd image store](../containerd.md) for details. | +| `overlay2` | Classic storage driver. Most widely compatible across all currently supported Linux distributions, and requires no extra configuration. | +| `fuse-overlayfs` | Preferred only for running Rootless Docker on hosts that don't support rootless `overlay2`. Not needed since Linux kernel 5.11, as `overlay2` works in rootless mode. See [rootless mode documentation](/manuals/engine/security/rootless.md) for details. | +| `btrfs` and `zfs` | Allow for advanced options, such as creating snapshots, but require more maintenance and setup. Each relies on the backing filesystem being configured correctly. | +| `vfs` | Intended for testing purposes, and for situations where no copy-on-write filesystem can be used. Performance is poor, and not generally recommended for production use. | The Docker Engine has a prioritized list of which storage driver to use if no storage driver is explicitly configured, assuming that the storage driver meets the prerequisites, and automatically selects a compatible storage driver. You -can see the order in the [source code for Docker Engine {{% param "docker_ce_version" %}}](https://github.com/moby/moby/blob/v{{% param "docker_ce_version" %}}/daemon/graphdriver/driver_linux.go#L52-L53). +can see the order in the [source code for Docker Engine {{% param "docker_ce_version" %}}](https://github.com/moby/moby/blob/docker-v{{% param "docker_ce_version" %}}/daemon/graphdriver/driver_linux.go). { #storage-driver-order } @@ -56,32 +65,35 @@ the final decision. ## Supported storage drivers per Linux distribution > [!NOTE] -> > Modifying the storage driver by editing the daemon configuration file isn't -> supported on Docker Desktop. Only the default `overlay2` driver or the -> [containerd storage](/manuals/desktop/features/containerd.md) are supported. The -> following table is also not applicable for the Docker Engine in rootless -> mode. For the drivers available in rootless mode, see the [Rootless mode -> documentation](/manuals/engine/security/rootless.md). - -Your operating system and kernel may not support every storage driver. For -example, `btrfs` is only supported if your system uses `btrfs` as storage. In -general, the following configurations work on recent versions of the Linux +> supported on Docker Desktop. Docker Desktop uses the +> [containerd image store](/manuals/desktop/features/containerd.md) by default +> (version 4.34 and later for clean installs). The following table is also not +> applicable for the Docker Engine in rootless mode. For the drivers available +> in rootless mode, see the +> [Rootless mode documentation](/manuals/engine/security/rootless.md). + +This section applies to classic storage drivers only. If you're using the +containerd image store (the default for Docker Engine 29.0+), see the +[containerd image store documentation](../containerd.md) instead. + +Your operating system and kernel may not support every classic storage driver. +For example, `btrfs` is only supported if your system uses `btrfs` as storage. +In general, the following configurations work on recent versions of the Linux distribution: -| Linux distribution | Recommended storage drivers | Alternative drivers | -| :------------------- | :--------------------------- | :------------------- | -| Ubuntu | `overlay2` | `zfs`, `vfs` | -| Debian | `overlay2` | `vfs` | -| CentOS | `overlay2` | `zfs`, `vfs` | -| Fedora | `overlay2` | `zfs`, `vfs` | -| SLES 15 | `overlay2` | `vfs` | -| RHEL | `overlay2` | `vfs` | +| Linux distribution | Default classic driver | Alternative drivers | +| :------------------- | :---------------------- | :------------------- | +| Ubuntu | `overlay2` | `zfs`, `vfs` | +| Debian | `overlay2` | `vfs` | +| CentOS | `overlay2` | `zfs`, `vfs` | +| Fedora | `overlay2` | `zfs`, `vfs` | +| SLES 15 | `overlay2` | `vfs` | +| RHEL | `overlay2` | `vfs` | -When in doubt, the best all-around configuration is to use a modern Linux -distribution with a kernel that supports the `overlay2` storage driver, and to -use Docker volumes for write-heavy workloads instead of relying on writing data -to the container's writable layer. +For systems using classic storage drivers, `overlay2` provides broad +compatibility across Linux distributions. Use Docker volumes for write-heavy +workloads instead of relying on writing data to the container's writable layer. The `vfs` storage driver is usually not the best choice, and primarily intended for debugging purposes in situations where no other storage-driver is supported. @@ -108,13 +120,20 @@ With regard to Docker, the backing filesystem is the filesystem where `/var/lib/docker/` is located. Some storage drivers only work with specific backing filesystems. -| Storage driver | Supported backing filesystems | -| :--------------- | :---------------------------- | -| `overlay2` | `xfs` with ftype=1, `ext4` | -| `fuse-overlayfs` | any filesystem | -| `btrfs` | `btrfs` | -| `zfs` | `zfs` | -| `vfs` | any filesystem | +| Storage driver | Supported backing filesystems | +| :--------------- | :-----------------------------------------------------| +| `overlay2` | `xfs` with ftype=1, `ext4`, `btrfs`, (and more) | +| `fuse-overlayfs` | any filesystem | +| `btrfs` | `btrfs` | +| `zfs` | `zfs` | +| `vfs` | any filesystem | + +> [!NOTE] +> +> Most filesystems should work if they have the required features. +> Consult [OverlayFS](https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html) +> for more information. + ## Other considerations diff --git a/content/manuals/engine/storage/volumes.md b/content/manuals/engine/storage/volumes.md index a62ca6f0fac..3f5838d6703 100644 --- a/content/manuals/engine/storage/volumes.md +++ b/content/manuals/engine/storage/volumes.md @@ -625,7 +625,7 @@ $ docker volume create \ --opt type=cifs \ --opt device=//uxxxxx.your-server.de/backup \ --opt o=addr=uxxxxx.your-server.de,username=uxxxxxxx,password=*****,file_mode=0777,dir_mode=0777 \ - --name cif-volume + --name cifs-volume ``` The `addr` option is required if you specify a hostname instead of an IP. @@ -775,8 +775,8 @@ testing using your preferred tools. A Docker data volume persists after you delete a container. There are two types of volumes to consider: -- Named volumes have a specific source from outside the container, for example, `awesome:/bar`. -- Anonymous volumes have no specific source. Therefore, when the container is deleted, you can instruct the Docker Engine daemon to remove them. +- Named volumes have a specific name, for example, `awesome:/bar`, where `awesome` is the name. +- Anonymous volumes have no specific name. Therefore, when the container is deleted, you can instruct the Docker Engine daemon to remove them. ### Remove anonymous volumes @@ -784,6 +784,9 @@ To automatically remove anonymous volumes, use the `--rm` option. For example, this command creates an anonymous `/foo` volume. When you remove the container, the Docker Engine removes the `/foo` volume but not the `awesome` volume. +The `--rm` option works with both foreground and detached (`-d`) containers. +The anonymous volumes are cleaned up when the container exits. + ```console $ docker run --rm -v /foo -v awesome:/bar busybox top ``` diff --git a/content/manuals/engine/swarm/_index.md b/content/manuals/engine/swarm/_index.md index 0635ed1ce45..d7859f7a994 100644 --- a/content/manuals/engine/swarm/_index.md +++ b/content/manuals/engine/swarm/_index.md @@ -130,12 +130,12 @@ roll back to a previous version of the service. * Learn Swarm mode [key concepts](key-concepts.md). * Get started with the [Swarm mode tutorial](swarm-tutorial/_index.md). * Explore Swarm mode CLI commands - * [swarm init](/reference/cli/docker/swarm/init.md) - * [swarm join](/reference/cli/docker/swarm/join.md) - * [service create](/reference/cli/docker/service/create.md) - * [service inspect](/reference/cli/docker/service/inspect.md) - * [service ls](/reference/cli/docker/service/ls.md) - * [service rm](/reference/cli/docker/service/rm.md) - * [service scale](/reference/cli/docker/service/scale.md) - * [service ps](/reference/cli/docker/service/ps.md) - * [service update](/reference/cli/docker/service/update.md) + * [swarm init](/reference/cli/docker/swarm/init/) + * [swarm join](/reference/cli/docker/swarm/join/) + * [service create](/reference/cli/docker/service/create/) + * [service inspect](/reference/cli/docker/service/inspect/) + * [service ls](/reference/cli/docker/service/ls/) + * [service rm](/reference/cli/docker/service/rm/) + * [service scale](/reference/cli/docker/service/scale/) + * [service ps](/reference/cli/docker/service/ps/) + * [service update](/reference/cli/docker/service/update/) diff --git a/content/manuals/engine/swarm/admin_guide.md b/content/manuals/engine/swarm/admin_guide.md index d63579a8442..777812222a9 100644 --- a/content/manuals/engine/swarm/admin_guide.md +++ b/content/manuals/engine/swarm/admin_guide.md @@ -154,7 +154,7 @@ worker nodes that do not meet these requirements cannot run these tasks. You can monitor the health of manager nodes by querying the docker `nodes` API in JSON format through the `/nodes` HTTP endpoint. Refer to the -[nodes API documentation](/reference/api/engine/v1.25/#tag/Node) +[nodes API documentation](/reference/api/engine/version/v1.25/#tag/Node) for more information. From the command line, run `docker node inspect ` to query the nodes. @@ -221,7 +221,7 @@ the `docker node rm` command. If a node becomes unreachable, unresponsive, or compromised you can forcefully remove the node without shutting it down by passing the `--force` flag. For instance, if `node9` becomes compromised: -```none +```console $ docker node rm node9 Error response from daemon: rpc error: code = 9 desc = node node9 is not down and can't be removed @@ -338,7 +338,7 @@ If you lose the quorum of managers, you cannot administer the swarm. If you have lost the quorum and you attempt to perform any management operation on the swarm, an error occurs: -```none +```text Error response from daemon: rpc error: code = 4 desc = context deadline exceeded ``` @@ -395,5 +395,5 @@ down to the original scale. You can use `docker service ps` to assess the curren balance of your service across nodes. See also -[`docker service scale`](/reference/cli/docker/service/scale.md) and -[`docker service ps`](/reference/cli/docker/service/ps.md). +[`docker service scale`](/reference/cli/docker/service/scale/) and +[`docker service ps`](/reference/cli/docker/service/ps/). diff --git a/content/manuals/engine/swarm/configs.md b/content/manuals/engine/swarm/configs.md index 7d2d5acba88..76502e97ef5 100644 --- a/content/manuals/engine/swarm/configs.md +++ b/content/manuals/engine/swarm/configs.md @@ -113,10 +113,10 @@ those remaining after a `docker service update --config-rm`. Use these links to read about specific commands, or continue to the [example about using configs with a service](#advanced-example-use-configs-with-a-nginx-service). -- [`docker config create`](/reference/cli/docker/config/create.md) -- [`docker config inspect`](/reference/cli/docker/config/inspect.md) -- [`docker config ls`](/reference/cli/docker/config/ls.md) -- [`docker config rm`](/reference/cli/docker/config/rm.md) +- [`docker config create`](/reference/cli/docker/config/create/) +- [`docker config inspect`](/reference/cli/docker/config/inspect/) +- [`docker config ls`](/reference/cli/docker/config/ls/) +- [`docker config rm`](/reference/cli/docker/config/rm/) ## Examples @@ -133,7 +133,7 @@ Docker configs. The `docker stack` command supports defining configs in a Compose file. However, the `configs` key is not supported for `docker compose`. See -[the Compose file reference](/reference/compose-file/legacy-versions.md) for details. +[the Compose file reference](/reference/compose-file/configs.md) for details. ### Simple example: Get started with configs @@ -216,7 +216,7 @@ real-world example, continue to to the config. The container ID is different, because the `service update` command redeploys the service. - ```none + ```console $ docker container exec -it $(docker ps --filter name=redis -q) cat /my-config cat: can't open '/my-config': No such file or directory @@ -248,7 +248,7 @@ This example assumes that you have PowerShell installed. ``` - + 2. If you have not already done so, initialize or join the swarm. ```powershell @@ -373,7 +373,7 @@ generate the site key and certificate, name the files `site.key` and the following contents into it. This constrains the root CA to only sign leaf certificates and not intermediate CAs. - ```none + ```ini [root_ca] basicConstraints = critical,CA:TRUE,pathlen:1 keyUsage = critical, nonRepudiation, cRLSign, keyCertSign @@ -407,7 +407,7 @@ generate the site key and certificate, name the files `site.key` and certificate so that it can only be used to authenticate a server and can't be used to sign certificates. - ```none + ```ini [server] authorityKeyIdentifier=keyid,issuer basicConstraints = critical,CA:FALSE @@ -438,7 +438,7 @@ generate the site key and certificate, name the files `site.key` and In the current directory, create a new file called `site.conf` with the following contents: - ```none + ```nginx server { listen 443 ssl; server_name localhost; @@ -616,7 +616,7 @@ configuration file. 1. Edit the `site.conf` file locally. Add `index.php` to the `index` line, and save the file. - ```none + ```nginx server { listen 443 ssl; server_name localhost; diff --git a/content/manuals/engine/swarm/how-swarm-mode-works/nodes.md b/content/manuals/engine/swarm/how-swarm-mode-works/nodes.md index 10aae0ee09c..fc011718653 100644 --- a/content/manuals/engine/swarm/how-swarm-mode-works/nodes.md +++ b/content/manuals/engine/swarm/how-swarm-mode-works/nodes.md @@ -67,17 +67,17 @@ gracefully stops tasks on nodes in `Drain` mode and schedules the tasks on an `Active` node. The scheduler does not assign new tasks to nodes with `Drain` availability. -Refer to the [`docker node update`](/reference/cli/docker/node/update.md) +Refer to the [`docker node update`](/reference/cli/docker/node/update/) command line reference to see how to change node availability. ## Change roles You can promote a worker node to be a manager by running `docker node promote`. For example, you may want to promote a worker node when you -take a manager node offline for maintenance. See [node promote](/reference/cli/docker/node/promote.md). +take a manager node offline for maintenance. See [node promote](/reference/cli/docker/node/promote/). You can also demote a manager node to a worker node. See -[node demote](/reference/cli/docker/node/demote.md). +[node demote](/reference/cli/docker/node/demote/). ## Learn more diff --git a/content/manuals/engine/swarm/how-swarm-mode-works/pki.md b/content/manuals/engine/swarm/how-swarm-mode-works/pki.md index d0ba71f22a4..67dbb9cdd98 100644 --- a/content/manuals/engine/swarm/how-swarm-mode-works/pki.md +++ b/content/manuals/engine/swarm/how-swarm-mode-works/pki.md @@ -14,7 +14,7 @@ as a manager node. By default, the manager node generates a new root Certificate Authority (CA) along with a key pair, which are used to secure communications with other nodes that join the swarm. If you prefer, you can specify your own externally-generated root CA, using the `--external-ca` flag of the -[docker swarm init](/reference/cli/docker/swarm/init.md) command. +[docker swarm init](/reference/cli/docker/swarm/init/) command. The manager node also generates two tokens to use when you join additional nodes to the swarm: one worker token and one manager token. Each token @@ -36,7 +36,7 @@ communications using a minimum of TLS 1.2. The example below shows the information from a certificate from a worker node: -```none +```text Certificate: Data: Version: 3 (0x2) @@ -55,7 +55,7 @@ By default, each node in the swarm renews its certificate every three months. You can configure this interval by running the `docker swarm update --cert-expiry

} this.updateVisible(); } - }" x-cloak - @guide-filter.window="filters = $event.detail.filters; updateVisible(); $nextTick(() => { window.scrollTo({ top: 0 }) })"> -
-

Featured guides

-
- {{- $featured := where .Pages "Params.featured" true }} - {{- with $featured }} - {{- range . }} -
- - {{- $img := resources.Get (.Params.image | default "/images/thumbnail.webp") }} - {{- $img = $img.Process "resize 600x" }} - -

{{ .Title }}

-
-

{{ .Summary }}

-
- {{ template "guide-metadata" . }} + }" + x-cloak + @guide-filter.window="filters = $event.detail.filters; updateVisible(); $nextTick(() => { window.scrollTo({ top: 0 }) })" + > +
+

Featured guides

+
+ {{- $featured := where .Pages "Params.featured" true }} + {{- with $featured }} + {{- range . }} + -
+ {{- end }} {{- end }} - {{- end }} -
-
-
-

All guides

-
-
- {{ partialCached "icon" "filter_alt" "filter_alt" }} -

Filtered results: showing out of guides.

+
+
-
- {{- range $name, $taxonomy := site.Taxonomies.tags }} -
{{ template "termchip" $taxonomy.Page.LinkTitle }}
- {{- end }} - {{- range $name, $taxonomy := site.Taxonomies.languages }} -
- - {{ .Page.LinkTitle }} +

+ All guides +

+
+
+ {{ partialCached "icon" "filter_alt" "filter_alt" }} +

+ Filtered results: showing + out of + guides. +

+
+
+ {{- range $name, $data := hugo.Data.tags }} +
+ {{ template "termchip" $data.title }} +
+ {{- end }} + {{- range $name, $data := hugo.Data.languages }} +
+ + {{ $data.title }} +
+ {{- end }}
- {{- end }}
-
-
- {{- range .Pages }} -
+ {{- range .Pages }} +
-
- {{ .Title }} - {{ template "guide-metadata" . }} + class="border-divider-light dark:border-divider-dark flex flex-col justify-between border-b p-4 hover:bg-gray-100 hover:dark:bg-gray-800" + > +
+ {{ .Title }} + {{ template "guide-metadata" . }} +
-
- {{- end }} + {{- end }} +
-
- -
+ +
{{ end }} {{- define "guide-metadata" }} -
-
- {{- $langs := .GetTerms "languages" }} - {{ partial "languages" $langs }} - {{- $tags := .GetTerms "tags" }} - {{- range $tags }} - {{ template "termchip" .Page.LinkTitle }} +
+
+ {{- with .Params.languages }} + {{ partial "guide-languages.html" . }} + {{- end }} + {{- with .Params.tags }} + {{ partial "guide-tags.html" . }} + {{- end }} +
+ {{- with .Params.time }} +
+ {{ partialCached "icon" "schedule" "schedule" }} + {{ . }} +
{{- end }}
- {{- with .Params.time }} -
- {{ partialCached "icon" "schedule" "schedule" }} - {{ . }} -
- {{- end }} -
{{- end }} {{- define "termchip" }} - + {{ partialCached "icon" "tag" "tag" }} diff --git a/layouts/guides/list.html b/layouts/guides/list.html deleted file mode 100644 index cc1fd072322..00000000000 --- a/layouts/guides/list.html +++ /dev/null @@ -1,25 +0,0 @@ -{{ define "left" }} - {{ partial "sidebar/mainnav.html" . }} - {{ partial "sidebar/guides.html" . }} -{{ end }} - -{{ define "main" }} -
- {{ partial "breadcrumbs.html" . }} -

{{ .Title }}

-
- {{ partialCached "pagemeta.html" . . }} -
-
- {{ .Content }} - {{ partial "heading.html" (dict "text" "Modules" "level" 2) }} -
    - {{- range $i, $e := .Pages }} -
  1. - {{ .LinkTitle }} -

    {{ plainify .Description }}

    -
  2. - {{- end }} -
-
-{{ end }} diff --git a/layouts/guides/single.html b/layouts/guides/single.html deleted file mode 100644 index f72668a9eb9..00000000000 --- a/layouts/guides/single.html +++ /dev/null @@ -1,20 +0,0 @@ -{{ define "left" }} - {{ partial "sidebar/mainnav.html" . }} - {{ partial "sidebar/guides.html" . }} -{{ end }} - -{{ define "main" }} - {{ partial "content-default.html" . }} -
- {{ if (.Store.Get "multipage") }} - {{- with .PrevInSection }} - - {{- end }} - {{- end }} -
-{{ end }} diff --git a/layouts/home.html b/layouts/home.html new file mode 100644 index 00000000000..24e504ee4b7 --- /dev/null +++ b/layouts/home.html @@ -0,0 +1,159 @@ + + + + {{ partial "head.html" . }} + + + + {{ partial "header.html" . }} + {{ partial "gordon-chat.html" . }} +
+ +
+ +
+
+

+ How can we help? +

+ +
+
+
+ + {{ partialCached "icon" "icons/gordon.svg" "icons/gordon.svg" }} + + +
+ + +
+
+ + +
+ {{ range slice + "How do I get started with Docker?" + "Can I run my AI agent in a sandbox?" + "What is a container?" + "What are Docker Hardened Images?" + "Why should I use Docker Compose?" + }} + + {{ end }} +
+
+
+ + +
+
+
+ {{ range slice + (dict "url" "/get-started/" "icon" "rocket" "title" "Get started" "description" "Learn Docker basics.") + (dict "url" "/guides/" "icon" "menu_book" "title" "Guides" "description" "Optimize your development workflows with Docker.") + (dict "url" "/manuals/" "icon" "description" "title" "Manuals" "description" "Install, set up, configure, and use Docker products.") + (dict "url" "/reference/" "icon" "terminal" "title" "Reference" "description" "Browse the CLI and API documentation.") + }} + +
+
+ + {{ partialCached "icon" .icon .icon }} + +
+
+

+ {{ .title }} +

+

+ {{ .description }} +

+
+
+
+ {{ end }} +
+
+
+ + +
+
+

+ Featured topics +

+
+ {{ range slice + (dict "url" "/dhi/" "title" "Docker Hardened Images") + (dict "url" "/ai/sandboxes/get-started/" "title" "Get started with Docker Sandboxes") + (dict "url" "/desktop/" "title" "Docker Desktop overview") + (dict "url" "/engine/install/" "title" "Install Docker Engine") + (dict "url" "/reference/dockerfile/" "title" "Dockerfile reference") + (dict "url" "/build/" "title" "Docker Build overview") + }} + + {{ .title }} + + {{ partialCached "icon" "arrow_forward" "arrow_forward" }} + + + {{ end }} +
+
+
+
+
{{ partialCached "footer.html" . }}
+ + diff --git a/layouts/home.llms.txt b/layouts/home.llms.txt new file mode 100644 index 00000000000..43d00260c03 --- /dev/null +++ b/layouts/home.llms.txt @@ -0,0 +1,81 @@ +{{- $home := site.Home -}} +{{- $manuals := site.GetPage "/manuals" -}} +{{- $getStarted := site.GetPage "/get-started" -}} +{{- $reference := site.GetPage "/reference" -}} + +# Docker Documentation + +{{ with $home.Description -}} +{{ chomp (replace . "\n" " ") }} +{{- else -}} +Docker Documentation is the official documentation site for Docker products, +including Docker Desktop, Docker Engine, Docker Build, Docker Compose, Docker +Hub, Docker Scout, and Docker AI features. +{{- end }} + +Use this file as a generated discovery guide for the docs site. For the complete +page index, use [llms-full.txt](https://docs.docker.com/llms-full.txt). + +> MCP endpoint for structured agent access: https://mcp-docs.docker.com/mcp +> Bulk text corpus for offline indexing: https://docs.docker.com/llms-full.txt + +## Primary sections + +{{- range sort (where site.Sections "Title" "!=" "") "Weight" }} +{{- if not (in (slice "includes" "tags") .Section) }} +- [{{ .LinkTitle | default .Title }}]({{ .Permalink }}): {{ with .Description }}{{ chomp (replace . "\n" " ") }}{{ else }}{{ .Title }} documentation.{{ end }} + Markdown: {{ partial "utils/markdown-url.html" . }} +{{- end }} +{{- end }} + +{{ with $getStarted }} +## Starter paths + +{{- range index .Params "get-started" }} +- [{{ .title }}]({{ .link | absURL }}): {{ .description }} +{{- end }} +{{- range index .Params "get-started2" }} +- [{{ .title }}]({{ .link | absURL }}): {{ .description }} +{{- end }} +{{- end }} + +{{ with $manuals }} +## Product documentation + +{{- range .Params.sidebar.groups }} +{{- $group := . }} +{{- $items := index $manuals.Params ($group | anchorize) }} +{{- with $items }} + +### {{ $group }} +{{- range . }} +- [{{ .title }}]({{ .link | absURL }}): {{ .description }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{ with $reference }} +## Reference entry points + +### File formats +{{- range .Params.grid_files }} +- [{{ .title }}]({{ .link | absURL }}): {{ .description }} +{{- end }} + +### Command-line interfaces +{{- range .Params.grid_clis }} +- [{{ .title }}]({{ .link | absURL }}): {{ .description }} +{{- end }} + +### APIs +{{- range .Params.grid_apis }} +- [{{ .title }}]({{ .link | absURL }}): {{ .description }} +{{- end }} +{{- end }} + +## Retrieval guidance + +- Prefer section landing pages above for navigation and scope selection. +- Use [llms-full.txt](https://docs.docker.com/llms-full.txt) when you need the complete documentation corpus. +- Use page-level markdown routes such as `https://docs.docker.com/engine/install.md` for clean per-page retrieval. diff --git a/layouts/home.llmsfull.txt b/layouts/home.llmsfull.txt new file mode 100644 index 00000000000..eaa17a21b42 --- /dev/null +++ b/layouts/home.llmsfull.txt @@ -0,0 +1,16 @@ +{{- $pages := where site.RegularPages "Params.sitemap" "!=" false -}} +{{- $sorted := sort $pages "RelPermalink" -}} + +# Docker Documentation full text + +> Source index: https://docs.docker.com/llms.txt +> This file contains page metadata and stable markdown URLs for bulk ingestion. +{{- range $sorted }} + +## {{ .Title }} +URL: {{ .Permalink }} +Markdown: {{ partial "utils/markdown-url.html" . }} +{{- with .Description }} +Description: {{ chomp (replace . "\n" " ") }} +{{- end }} +{{- end }} diff --git a/layouts/home.markdown.md b/layouts/home.markdown.md new file mode 100644 index 00000000000..239ce8eb50a --- /dev/null +++ b/layouts/home.markdown.md @@ -0,0 +1,3 @@ +# Docker Documentation + +{{ .RenderShortcodes }} diff --git a/layouts/home.metadata.json b/layouts/home.metadata.json new file mode 100644 index 00000000000..caee6bc70c6 --- /dev/null +++ b/layouts/home.metadata.json @@ -0,0 +1,15 @@ +[ +{{- range where site.Pages "Params.sitemap" "!=" false -}} + {{- $title := .LinkTitle -}} + {{- $desc := partialCached "utils/description.html" . . -}} + {{- $kwd := partialCached "utils/keywords.html" . . -}} + {{- $tags := slice -}} + {{- range (.Params.tags | default slice) -}} + {{- $tagData := index hugo.Data.tags . -}} + {{- with $tagData -}} + {{ $tags = $tags | append .title }} + {{- end -}} + {{- end -}} + {{- jsonify (dict "url" .Permalink "title" $title "description" $desc "keywords" $kwd "tags" $tags) -}}, +{{- end -}} +{}] diff --git a/layouts/home.redirects.json b/layouts/home.redirects.json new file mode 100644 index 00000000000..c9219014955 --- /dev/null +++ b/layouts/home.redirects.json @@ -0,0 +1,40 @@ +{{- /* + + This template generates the redirects.json file used to generate 301 + redirects in production. It takes all the redirects defined in + data/redirects.yml, as well as all the aliases defined in front matter, and + outputs a simple key-value JSON file: + + { + "": "", + ... + } + + e.g. + + { + "/engine/reference/builder/": "/reference/dockerfile/", + ... + } + + */ +-}} + +{{- $redirects := newScratch }} +{{- range hugo.Sites -}} + {{- range .Pages -}} + {{- if .Params.aliases -}} + {{- $target := .RelPermalink -}} + {{- range .Params.aliases -}} + {{ $redirects.SetInMap "paths" . $target }} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- range $target, $aliases := hugo.Data.redirects -}} +{{- range $aliases }} +{{- $redirects.SetInMap "paths" . $target -}} +{{- end -}} +{{- end -}} +{{- $opts := dict "noHTMLEscape" true }} +{{- $redirects.Get "paths" | jsonify $opts }} diff --git a/layouts/home.robots.txt b/layouts/home.robots.txt new file mode 100644 index 00000000000..7c2c0797547 --- /dev/null +++ b/layouts/home.robots.txt @@ -0,0 +1,19 @@ +{{- /* + For Netlify deployments, we disallow all routes to prevent search + engines from indexing our preview sites. + */ +-}} + +{{- if hugo.IsProduction -}} +User-agent: * +Disallow: /unassociated-machines/ +Content-Signal: ai-train=yes, search=yes, ai-input=yes + + +Sitemap: {{ "sitemap.xml" | absURL }} +{{- else -}} +# Disable all indexing on staging websites and Netlify previews to prevent +# them showing up in search results. +User-agent: * +Disallow: / +{{- end }} diff --git a/layouts/index.html b/layouts/index.html deleted file mode 100644 index c157c1943be..00000000000 --- a/layouts/index.html +++ /dev/null @@ -1,262 +0,0 @@ - - - - - {{ partial "head.html" . }} - - - -
- - - - -
- {{ partial "header.html" . }} -
-
-
-
-
- - {{ partial "icon" "play_circle" }} -

Get Docker

-
-

- Learn how to install Docker for Mac, Windows, or Linux and explore - our developer tools. -

- - {{ partial "icon" "download"}} - Get Docker - -
-
- Low-fi desktop app - -
-
- - - -
-
-
-
-

Gen AI catalog {{ partial - "components/badge.html" (dict "color" "blue" "content" "New") - }}

-

- Integrate AI solutions into your apps with minimal effort -

-
- -
-
- Low-fi desktop app - -
-
-
-
-
-
-

- Docker Model Runner - {{ partial "components/badge.html" (dict "color" "blue" "content" "Beta") }} -

-

- Run, test, and serve AI models locally in seconds — no setup, no hassle. -

-

- Whether you’re experimenting with the latest LLMs or deploying to production, - Docker Model Runner brings the performance and control you need, without the friction. - -

-
- -
-
-
-
- -
-

Browse by section

-
- {{ range .Params.grid }} -
-
-
- {{ partial "icon" .icon }} -
-
{{ .title }}
-
-
-
-

{{ .description | markdownify }}

- -
-
-
- {{ end }} -
-
-
-

Browse by tag

-
- {{ $tags := slice }} - {{- range site.Taxonomies.tags }} - {{ $tags = $tags | append .Page }} - {{ end }} - {{ partial "tags.html" $tags }} -
-
-
- -
-
-
-

Community resources

-

- Find fellow Docker enthusiasts, engage in insightful discussions, - share knowledge, and collaborate on projects. Our communities - offer a rich online experience for developers to create valuable - connections that challenge and inspire! -

- -
-
- -
-
-
{{ partialCached "footer.html" . }}
- - - diff --git a/layouts/index.metadata.json b/layouts/index.metadata.json deleted file mode 100644 index acfaf21f2b8..00000000000 --- a/layouts/index.metadata.json +++ /dev/null @@ -1,12 +0,0 @@ -[ -{{- range where site.Pages "Params.sitemap" "!=" false -}} - {{- $title := .LinkTitle -}} - {{- $desc := partialCached "utils/description.html" . . -}} - {{- $kwd := partialCached "utils/keywords.html" . . -}} - {{- $tags := slice -}} - {{- range (.GetTerms "tags") -}} - {{ $tags = $tags | append .LinkTitle }} - {{- end -}} - {{- jsonify (dict "url" .Permalink "title" $title "description" $desc "keywords" $kwd "tags" $tags) -}}, -{{- end -}} -{}] diff --git a/layouts/index.redirects.json b/layouts/index.redirects.json deleted file mode 100644 index 6229dfc8d18..00000000000 --- a/layouts/index.redirects.json +++ /dev/null @@ -1,38 +0,0 @@ -{{- /* - - This template generates the redirects.json file used to generate 301 - redirects in production. It takes all the redirects defined in - data/redirects.yml, as well as all the aliases defined in front matter, and - outputs a simple key-value JSON file: - - { - "": "", - ... - } - - e.g. - - { - "/engine/reference/builder/": "/reference/dockerfile/", - ... - } - - */ --}} - -{{- $redirects := newScratch }} -{{- range $i, $e := site.AllPages -}} - {{- if .Params.aliases -}} - {{- $target := .RelPermalink -}} - {{- range .Params.aliases -}} - {{ $redirects.SetInMap "paths" . $target }} - {{- end -}} - {{- end -}} -{{- end -}} -{{- range $target, $aliases := site.Data.redirects -}} -{{- range $aliases }} -{{- $redirects.SetInMap "paths" . $target -}} -{{- end -}} -{{- end -}} -{{- $opts := dict "noHTMLEscape" true }} -{{- $redirects.Get "paths" | jsonify $opts }} diff --git a/layouts/index.robots.txt b/layouts/index.robots.txt deleted file mode 100644 index 3e9a658fdf9..00000000000 --- a/layouts/index.robots.txt +++ /dev/null @@ -1,17 +0,0 @@ -{{- /* - For Netlify deployments, we disallow all routes to prevent search - engines from indexing our preview sites. - */ --}} - -{{- if hugo.IsProduction -}} -User-agent: * - - -Sitemap: {{ "sitemap.xml" | absURL }} -{{- else -}} -# Disable all indexing on staging websites and Netlify previews to prevent -# them showing up in search results. -User-agent: * -Disallow: / -{{- end }} diff --git a/layouts/list.html b/layouts/list.html new file mode 100644 index 00000000000..834974e186e --- /dev/null +++ b/layouts/list.html @@ -0,0 +1,5 @@ +{{ define "article" }} +
+ {{ partial "content-default.html" . }} +
+{{ end }} diff --git a/layouts/list.markdown.md b/layouts/list.markdown.md new file mode 100644 index 00000000000..54f30713767 --- /dev/null +++ b/layouts/list.markdown.md @@ -0,0 +1,3 @@ +# {{ .Title }} + +{{ .RenderShortcodes }} diff --git a/layouts/partials/aside.html b/layouts/partials/aside.html deleted file mode 100644 index d72fa42a853..00000000000 --- a/layouts/partials/aside.html +++ /dev/null @@ -1,15 +0,0 @@ - \ No newline at end of file diff --git a/layouts/partials/breadcrumbs.html b/layouts/partials/breadcrumbs.html deleted file mode 100644 index 17db7a97f0e..00000000000 --- a/layouts/partials/breadcrumbs.html +++ /dev/null @@ -1,12 +0,0 @@ - diff --git a/layouts/partials/components/accordion.html b/layouts/partials/components/accordion.html deleted file mode 100644 index 93fc14fb302..00000000000 --- a/layouts/partials/components/accordion.html +++ /dev/null @@ -1,17 +0,0 @@ -
- -
- {{ markdownify .body }} -
-
diff --git a/layouts/partials/components/badge.html b/layouts/partials/components/badge.html deleted file mode 100644 index f21b11e9652..00000000000 --- a/layouts/partials/components/badge.html +++ /dev/null @@ -1,17 +0,0 @@ -{{- $colors := (dict - "amber" "bg-amber-light dark:bg-amber-dark" - "blue" "bg-blue-light dark:bg-blue-dark" - "green" "bg-green-light dark:bg-green-dark" - "red" "bg-red-light dark:bg-red-dark" - "violet" "bg-violet-light dark:bg-violet-dark" - ) --}} - -{{- if not (isset $colors .color) -}} -{{- errorf "[badge] wrong color name: '%s' - supported values: amber, blue, green, red, violet" .color -}}h -{{- end -}} - -{{ .content }} diff --git a/layouts/partials/components/card.html b/layouts/partials/components/card.html deleted file mode 100644 index 0d1b6bead8d..00000000000 --- a/layouts/partials/components/card.html +++ /dev/null @@ -1,52 +0,0 @@ -{{ if (and .image .icon) }} - {{ errorf "card: don't use both image and icon: %s" . }} -{{ end }} - diff --git a/layouts/partials/components/guide-summary.html b/layouts/partials/components/guide-summary.html deleted file mode 100644 index 0a9c4f4fea3..00000000000 --- a/layouts/partials/components/guide-summary.html +++ /dev/null @@ -1,16 +0,0 @@ -
-
-
- Skill level - {{ .Params.skill }} -
-
- Time to complete - {{ .Params.time }} -
-
- Prerequisites - {{ .Params.prereq }} -
-
-
diff --git a/layouts/partials/components/support-button.html b/layouts/partials/components/support-button.html deleted file mode 100644 index 5680f6f6b0a..00000000000 --- a/layouts/partials/components/support-button.html +++ /dev/null @@ -1,7 +0,0 @@ - - Contact support - \ No newline at end of file diff --git a/layouts/partials/content-default.html b/layouts/partials/content-default.html deleted file mode 100644 index 1c96e70c47f..00000000000 --- a/layouts/partials/content-default.html +++ /dev/null @@ -1,20 +0,0 @@ -
-
- {{ partial "breadcrumbs.html" . }} -

- {{ .Title }} -

- -
- {{ partialCached "pagemeta.html" . . }} -
-
- {{ .Content }} -
- - -
diff --git a/layouts/partials/favicon.html b/layouts/partials/favicon.html deleted file mode 100644 index 0893cdc33f2..00000000000 --- a/layouts/partials/favicon.html +++ /dev/null @@ -1,17 +0,0 @@ -{{- $favicon := resources.Get "favicons/docs@2x.ico" }} - - - diff --git a/layouts/partials/footer.html b/layouts/partials/footer.html deleted file mode 100644 index aae73710996..00000000000 --- a/layouts/partials/footer.html +++ /dev/null @@ -1,99 +0,0 @@ -
- {{ partialCached "components/support-button.html" . }} -
-
-
-
- Product offerings - Pricing - About us - {{- with .GetPage "/contribute" }} - {{ .LinkTitle }} - {{- end }} - Read llms.txt -
-
- -
- -
- Theme: - -
-
-
-
diff --git a/layouts/partials/github-links.html b/layouts/partials/github-links.html deleted file mode 100644 index 1f7e518b739..00000000000 --- a/layouts/partials/github-links.html +++ /dev/null @@ -1,28 +0,0 @@ -{{- /* - Adds links for editing the page or requesting changes: - - "Edit this page": Only in production, skips files from `_vendor/` (upstream repositories). - - "Request changes": Links to a pre-filled issue form. -*/ -}} -{{ if hugo.IsProduction }} -{{ with .File }} -{{ if not (in .Filename "/_vendor/") }} -

- {{ partialCached "icon" "edit" "edit" }} - {{- T "editPage" -}} - - {{ partialCached "icon" "open_in_new" "open_in_new" }} - -

-{{ end }} -{{ end }} -

- {{ partialCached "icon" "check" "check" }} - {{- T "requestChanges" -}} - - {{ partialCached "icon" "open_in_new" "open_in_new" }} - - -

-{{ end }} diff --git a/layouts/partials/guides-stepper.html b/layouts/partials/guides-stepper.html deleted file mode 100644 index 2dcb364658c..00000000000 --- a/layouts/partials/guides-stepper.html +++ /dev/null @@ -1,58 +0,0 @@ -{{/*- Multi-page guide: render a progress bar -*/}} -
- {{ $totalPages := len .CurrentSection.Pages }} - {{/*- initialize the page store - - $stepper_seen controls the color of the item in the stepper - - green: "completed" - index is lower than current page - blue: current page - gray: next in section - for the section page, they're all gray - - default to true if kind = section - (make all entries gray) - - -*/}} - {{ page.Store.Set "stepper_seen" .IsSection }} - {{/*- Loop over the pages in this guide -*/}} - {{ range $i, $e := .CurrentSection.Pages }} - {{ $isLast := eq (add $i 1) $totalPages }} -
-
- {{/*- Render the page's index digit (1,2,3,4 etc) -*/}} - - {{ add $i 1 }} - - {{/*- Render the vertical border -*/}} - {{ if not $isLast }} -
- {{ end }} -
- {{/*- Render the page's title -*/}} - -
- {{ end }} -
diff --git a/layouts/partials/header.html b/layouts/partials/header.html deleted file mode 100644 index dbe0283041d..00000000000 --- a/layouts/partials/header.html +++ /dev/null @@ -1,45 +0,0 @@ -
-
-
- {{- if not .IsHome }} - - {{- end }} - - -
-
- {{ partialCached "search-bar.html" "-" }} - -
-
-
diff --git a/layouts/partials/heading.html b/layouts/partials/heading.html deleted file mode 100644 index 9df46b60c68..00000000000 --- a/layouts/partials/heading.html +++ /dev/null @@ -1,6 +0,0 @@ -{{ $id := .id | default (anchorize (plainify .text)) }} - - - {{ .text }} - - diff --git a/layouts/partials/icon.html b/layouts/partials/icon.html deleted file mode 100644 index b09d1634e74..00000000000 --- a/layouts/partials/icon.html +++ /dev/null @@ -1,8 +0,0 @@ -{{- $svg := resources.Get (fmt.Printf "icons/%s-fill.svg" .) }} -{{- if not $svg }} - {{- errorf "Failed to get icon: %v\n\n" . }} -{{ end }} -{{- if not $svg.Content }} - {{- errorf "Failed to get icon: %v\n\n" . }} -{{- end }} -{{- safe.HTML $svg.Content -}} diff --git a/layouts/partials/languages.html b/layouts/partials/languages.html deleted file mode 100644 index 8f3f5bef303..00000000000 --- a/layouts/partials/languages.html +++ /dev/null @@ -1,21 +0,0 @@ -{{- /* - List of languages (taxonomy) chips with images - Context: page.Pages - */ --}} -{{- range . -}} - {{- if eq .File nil }} - {{- errorf "[languages] Undefined language: '%s' in %s" (urlize (strings.ToLower .Title)) page.File.Filename }} - {{- end }} - {{- if not .Page.Params.icon }} - {{- errorf "[languages] language is missing an icon: '%s' in %s" (urlize (strings.ToLower .Title)) page.File.Filename }} - {{- end }} - - - {{ .Page.LinkTitle }} - -{{- end -}} - diff --git a/layouts/partials/md-dropdown.html b/layouts/partials/md-dropdown.html deleted file mode 100644 index b0a512f9fb2..00000000000 --- a/layouts/partials/md-dropdown.html +++ /dev/null @@ -1,119 +0,0 @@ -
- - Page options - - {{ partialCached "icon" "arrow_drop_down" "arrow_drop_down" }} - - - - - - -
- - - - - -
-
- - \ No newline at end of file diff --git a/layouts/partials/pagemeta.html b/layouts/partials/pagemeta.html deleted file mode 100644 index 364ac2102c1..00000000000 --- a/layouts/partials/pagemeta.html +++ /dev/null @@ -1,37 +0,0 @@ -{{- /* - Renders a table of contents (ToC) for the page. - - Uses `.Fragments.Headings` to generate a nested ToC if headings exist and `notoc` is not set to `true`. - - Limits heading levels to a min and max range (`$min` and `$max`). - - Wraps the ToC in a `data-pagefind-ignore` container to exclude it from search indexing. - - Includes a recursive template (`walkHeadingFragments`) to handle nested headings. -*/ -}} -{{- $toc := false }} -{{- with .Fragments }} - {{- $toc = and (ne page.Params.notoc true) .Headings }} -{{- end }} -{{- with $toc }} -
-
{{ T "tableOfContents" }}
- -
-{{- end }} - -{{- define "walkHeadingFragments" }} - {{- $min := default 2 page.Params.toc_min }} - {{- $max := default 3 page.Params.toc_max }} -
    - {{- range . }} - {{- if and (ge .Level $min) (le .Level $max) }} -
  • - {{ markdownify .Title }} -
  • - {{- end }} - {{- with .Headings }} - {{ template "walkHeadingFragments" . }} - {{- end }} - {{- end }} -
-{{- end }} diff --git a/layouts/partials/pagination.html b/layouts/partials/pagination.html deleted file mode 100644 index de276f98ccc..00000000000 --- a/layouts/partials/pagination.html +++ /dev/null @@ -1,70 +0,0 @@ -{{- if gt .Paginator.TotalPages 1 }} - {{ $selectable := "cursor-pointer" }} - {{ $active := "hover:text-black dark:hover:text-white underline underline-offset-8" }} - {{ $disabled := "cursor-not-allowed text-gray-light dark:text-gray-dark" }} - -{{- end }} - diff --git a/layouts/partials/search-bar.html b/layouts/partials/search-bar.html deleted file mode 100644 index 6595e32d3ff..00000000000 --- a/layouts/partials/search-bar.html +++ /dev/null @@ -1,122 +0,0 @@ - - - {{ partialCached "icon" "search" "search" }} - - - diff --git a/layouts/partials/sidebar/guides.html b/layouts/partials/sidebar/guides.html deleted file mode 100644 index 6a51b3474d2..00000000000 --- a/layouts/partials/sidebar/guides.html +++ /dev/null @@ -1,52 +0,0 @@ -{{- /* - Renders a sidebar for pages in the `/guides` section. - - Detects if the current page is part of a multipage guide (`.Store.Set "multipage"`). - - Displays the section's title, summary, languages, tags, and estimated time (`Params.time`). - - Includes a stepper navigation (`guides-stepper.html`) for multipage guides. - - Optionally lists resource links from `Params.resource_links`. - - Provides a link back to the main `/guides/` index. -*/ -}} -
- {{- $root := . }} - {{- .Store.Set "multipage" false }} - {{- if ne .CurrentSection .FirstSection }} - {{- $root = .CurrentSection }} - {{- .Store.Set "multipage" true }} - {{- end }} - -
{{ $root.Summary }}
-
-
- {{- with ($root.GetTerms "languages") }} - {{ partial "languages.html" . }} - {{- end }} - {{- with ($root.GetTerms "tags") }} - {{ partial "tags.html" . }} - {{- end }} -
- {{- with ($root.Params.time) }} -
- {{ partialCached "icon" "schedule" "schedule" }} - {{ . }} -
- {{- end -}} -
- {{- if (.Store.Get "multipage") }} - {{- partial "guides-stepper.html" . }} - {{- end }} - {{- with $root.Params.resource_links }} -
-

Resources:

- -
- {{- end }} - « Back to all guides -
diff --git a/layouts/partials/sidebar/mainnav.html b/layouts/partials/sidebar/mainnav.html deleted file mode 100644 index 7055a6f79f6..00000000000 --- a/layouts/partials/sidebar/mainnav.html +++ /dev/null @@ -1,54 +0,0 @@ -{{- /* - Complements the section-specific sidebar navigation. - - Renders the main navigation for site sections, linking to the current or ancestor section/page. - - Uses the `site.Menus.main` configuration to determine primary navigation structure. - - Toggles visibility of nested menu items for the main sections. -*/ -}} - -
-
- - {{- $curr := .FirstSection }} - {{- if eq $curr site.Home }} - {{- $curr = . }} - {{- end }} - {{- range site.Menus.main }} - {{- if or (.Page.IsAncestor page) (eq .Page page) }} - {{- $curr = .Page }} - {{- end }} - {{- end }} - - {{- with $curr.Params.icon }} - - {{- partialCached "icon.html" . . -}} - - {{- end }} - {{- $curr.LinkTitle -}} - - -
- -
-
diff --git a/layouts/partials/sidebar/sections.html b/layouts/partials/sidebar/sections.html deleted file mode 100644 index 25c84cbf3b7..00000000000 --- a/layouts/partials/sidebar/sections.html +++ /dev/null @@ -1,107 +0,0 @@ -{{- /* - This template recursively renders the sidebar navigation, grouping pages by `Params.sidebar.groups`. - Highlights: - - Supports hierarchical navigation with collapsible sections (`renderList` template). - - Dynamically applies current page highlighting and expanded states. - - Handles external links via `Params.sidebar.goto` in `renderSingle`. - - Requires `Params.sitemap` and `Params.sidebar` for filtering and behavior. -*/ -}} - - - -{{ define "renderChildren" }} - {{- $pages := where .Pages "Params.sitemap" "ne" "false" }} - {{- if .Params.sidebar.reverse }} - {{ $pages = .Pages.Reverse }} - {{- end }} - {{- $ungrouped := where $pages "Params.sidebar.group" "==" nil }} - {{- range $ungrouped }} - {{- if .IsSection }} - {{- template "renderList" . }} - {{- else }} - {{- template "renderSingle" . }} - {{- end }} - {{- end }} - {{- range .Params.sidebar.groups }} -
  • {{ . }}
  • - {{- range where $pages "Params.sidebar.group" . }} - {{- if .IsSection }} - {{- template "renderList" . }} - {{- else }} - {{- template "renderSingle" . }} - {{- end }} - {{- end }} - {{- end }} -{{ end }} - -{{/* Recursive template for sidebar items */}} -{{ define "renderList" }} - {{ $isCurrent := eq page . }} - {{ $expanded := or $isCurrent (page.IsDescendant .) }} -
  • -
    -
    - {{- if .Permalink }} - {{/* If the link is not empty, use it */}} - - {{ template "renderTitle" . }} - - {{- else }} - {{/* Otherwise, just expand the section */}} - - {{- end }} -
    - -
    -
      - {{ template "renderChildren" . }} -
    -
  • -{{ end }} - -{{ define "renderSingle" }} - {{- if .Params.sidebar.goto }} -
  • - - {{ template "renderTitle" . }} - -
  • - {{- else }} - {{ $isCurrent := eq page . }} -
  • - - {{ template "renderTitle" . }} - -
  • - {{- end }} -{{ end }} - -{{ define "renderTitle" }} - {{ .LinkTitle }} - {{- with .Params.sidebar.badge }} - {{- partial "components/badge.html" (dict "color" .color "content" .text) }} - {{- end }} -{{ end }} \ No newline at end of file diff --git a/layouts/partials/sidebar/tags.html b/layouts/partials/sidebar/tags.html deleted file mode 100644 index fd1e70d0ce8..00000000000 --- a/layouts/partials/sidebar/tags.html +++ /dev/null @@ -1,17 +0,0 @@ -{{- /* - Renders a flat list of tags for the "tags" taxonomy. - - Unlike `sections.html`, this template is based on taxonomy terms, not section pages. - - Highlights the current tag page and links to all tag pages. -*/ -}} - diff --git a/layouts/partials/tags.html b/layouts/partials/tags.html deleted file mode 100644 index 63eece39393..00000000000 --- a/layouts/partials/tags.html +++ /dev/null @@ -1,20 +0,0 @@ -{{- /* - List of tag "chips" as links - Context: page.Pages - */ --}} -{{- range . -}} - {{- if eq .File nil }} - {{- errorf "[tags] Undefined tag: '%s' in %s" (urlize (strings.ToLower .Title)) page.File.Filename }} - {{- end }} - - - {{ partialCached "icon" "tag" "tag" }} - - {{ .LinkTitle }} - -{{- end -}} diff --git a/layouts/partials/tooltip.html b/layouts/partials/tooltip.html deleted file mode 100644 index 5c4cc30be6e..00000000000 --- a/layouts/partials/tooltip.html +++ /dev/null @@ -1,15 +0,0 @@ -{{- /* - Renders a tooltip component using Floating UI for positioning. - See script at `assets/js/src/tooltip.js` for functionality. -*/ -}} -
    -
    - {{ partialCached "icon" "help" "help" }} -
    - -
    diff --git a/layouts/partials/utils/css.html b/layouts/partials/utils/css.html deleted file mode 100644 index 5278447a177..00000000000 --- a/layouts/partials/utils/css.html +++ /dev/null @@ -1,15 +0,0 @@ -{{- /* - Processes and links the main CSS file (`assets/css/styles.css`). - - Applies PostCSS, minification, fingerprinting, and post-processing in production. - - Adds inline CSS to hide injected images in production builds. -*/ -}} -{{ $styles := resources.Get "css/styles.css" }} -{{ $styles = $styles | css.PostCSS }} -{{ if hugo.IsProduction }} - {{ $styles = $styles | minify | fingerprint | resources.PostProcess }} - -{{ end }} - diff --git a/layouts/redirect/single.html b/layouts/redirect/single.html new file mode 100644 index 00000000000..03bf721b353 --- /dev/null +++ b/layouts/redirect/single.html @@ -0,0 +1 @@ +{{- template "alias.html" (dict "Permalink" .Params.target) -}} diff --git a/layouts/samples/single.html b/layouts/samples/single.html index 695a3328c34..a40820bc59a 100644 --- a/layouts/samples/single.html +++ b/layouts/samples/single.html @@ -1,13 +1,8 @@ -{{ define "left" }} - {{ partial "sidebar/mainnav.html" . }} - {{ partial "sidebar/sections.html" . }} -{{ end }} - -{{ define "main" }} - {{ partial "breadcrumbs.html" . }} -
    +{{ define "article" }} +
    + {{ partial "breadcrumbs.html" . }} {{ with .Title }} -

    {{ . }}

    +

    {{ . }}

    {{ end }} @@ -17,7 +12,7 @@

    {{ . }}

    - {{ range site.Data.samples.samples }} + {{ range hugo.Data.samples.samples }} {{ if in .services $.Params.service }} @@ -51,7 +46,3 @@

    Looking for more samples?

    {{ end }} - -{{ define "right" }} - {{ partial "aside.html" . }} -{{ end }} diff --git a/layouts/samples/single.markdown.md b/layouts/samples/single.markdown.md new file mode 100644 index 00000000000..c7936ffaeca --- /dev/null +++ b/layouts/samples/single.markdown.md @@ -0,0 +1,9 @@ +# {{ .Title }} + +| Name | Description | +|------|-------------| +{{- range hugo.Data.samples.samples }} +{{- if in .services $.Params.service }} +| [{{ .title }}]({{ .url }}) | {{ chomp .description }} | +{{- end }} +{{- end }} diff --git a/layouts/sbx-cli.html b/layouts/sbx-cli.html new file mode 100644 index 00000000000..0b10f5e0fa6 --- /dev/null +++ b/layouts/sbx-cli.html @@ -0,0 +1,179 @@ +{{ define "article" }} + {{ $data := index hugo.Data.sbx_cli .Params.datafile }} + + {{ .Store.Set "headings" slice }} +
    + {{ partial "breadcrumbs.html" . }} +
    +

    {{ .Title }}

    +
    + + {{- /* Summary table */ -}} +
    +
    {{ printf "[%s](%s)" .title .url | markdownify }}
    + + {{ with $data.synopsis }} + + + + + {{ end }} + {{ with $data.usage }} + + + + + {{ end }} + +
    Description{{ . }}
    Usage{{ . }}
    +
    + + {{- /* Description */ -}} + {{ with $data.description }} + {{ $heading := dict "level" 2 "text" "Description" }} + {{ partialCached "heading.html" $heading "sbx-cli-description" }} + {{ $.Store.Add "headings" $heading }} + {{ . | $.RenderString (dict "display" "block") }} + {{ end }} + + {{- /* Subcommands (for section pages) */ -}} + {{ if eq .Kind "section" }} + {{ $heading := dict "level" 2 "text" "Commands" }} + {{ partialCached "heading.html" $heading "sbx-cli-commands" }} + {{ $.Store.Add "headings" $heading }} + + + + + + + + + {{ range .Pages }} + {{ if .Params.datafile }} + {{ $child := index hugo.Data.sbx_cli .Params.datafile }} + + + + + {{ end }} + {{ end }} + +
    CommandDescription
    {{ .Title }}{{ $child.synopsis }}
    + {{ end }} + + {{- /* Options */ -}} + {{ with $data.options }} + {{ $opts := where . "name" "ne" "help" }} + {{ with $opts }} + {{ $heading := dict "level" 2 "text" "Options" }} + {{ partialCached "heading.html" $heading "sbx-cli-options" }} + {{ $.Store.Add "headings" $heading }} +
    + + + + + + + + + + {{ range . }} + + + {{ $skipDefault := `[],false,` }} + + + + {{ end }} + +
    OptionDefaultDescription
    + {{ with .shorthand }}-{{ . }}, {{ end }}--{{ .name }} + + {{ with .default_value }} + {{ cond (in $skipDefault .) "" (printf "%s" . | safeHTML) }} + {{ end }} + + {{ with .usage }} + {{ strings.TrimSpace . }} + {{ end }} +
    +
    + {{ end }} + {{ end }} + + {{- /* Inherited (global) options */ -}} + {{ with $data.inherited_options }} + {{ $opts := where . "name" "ne" "help" }} + {{ with $opts }} + {{ $heading := dict "level" 2 "text" "Global options" }} + {{ partialCached "heading.html" $heading "sbx-cli-global-options" }} + {{ $.Store.Add "headings" $heading }} +
    + + + + + + + + + + {{ range . }} + + + {{ $skipDefault := `[],false,` }} + + + + {{ end }} + +
    OptionDefaultDescription
    + {{ with .shorthand }}-{{ . }}, {{ end }}--{{ .name }} + + {{ with .default_value }} + {{ cond (in $skipDefault .) "" (printf "%s" . | safeHTML) }} + {{ end }} + + {{ with .usage }} + {{ strings.TrimSpace . }} + {{ end }} +
    +
    + {{ end }} + {{ end }} + + {{- /* Examples */ -}} + {{ with $data.example }} + {{ $heading := dict "level" 2 "text" "Examples" }} + {{ partialCached "heading.html" $heading "sbx-cli-examples" }} + {{ $.Store.Add "headings" $heading }} + {{- /* Dedent: strip up to 6 leading spaces from each line */ -}} + {{ $dedented := replaceRE `(?m)^ {2,6}` "" . }} + {{ $code := printf "```console\n%s\n```" (strings.TrimSpace $dedented) }} + {{ $code | $.RenderString (dict "display" "block") }} + {{ end }} + + +{{ end }} + +{{ define "right" }} + +{{ end }} diff --git a/layouts/security-announcements.rss.xml b/layouts/security-announcements.rss.xml new file mode 100644 index 00000000000..88134d7838a --- /dev/null +++ b/layouts/security-announcements.rss.xml @@ -0,0 +1,19 @@ +{{- $_ := .Content -}} + + + Docker Docs - Security Announcements + Docker security announcements and updates + {{ .Permalink }} + Hugo -- gohugo.io + {{ .Site.Language.Locale | default "en" }} + {{ now.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }} + + {{- range (index .Fragments.Headings 0).Headings }} + + {{ .Title }} + {{ $.Permalink }}#{{ .ID }} + security-{{ .ID }} + + {{- end }} + + diff --git a/layouts/series.html b/layouts/series.html new file mode 100644 index 00000000000..dc40a84774f --- /dev/null +++ b/layouts/series.html @@ -0,0 +1,47 @@ +{{ define "article" }} +
    + {{ partial "breadcrumbs.html" . }} +

    {{ .Title }}

    +
    + {{ partialCached "pagemeta.html" . . }} +
    +
    +
    {{ .Summary }}
    + {{- if or .Params.proficiencyLevel .Params.time .Params.prerequisites }} +
    +
    + {{- with .Params.proficiencyLevel }} +
    + Skill level + {{ . }} +
    + {{- end }} + {{- with .Params.time }} +
    + Time to complete + {{ . }} +
    + {{- end }} + {{- with .Params.prerequisites }} +
    + Prerequisites + {{ . }} +
    + {{- end }} +
    +
    + {{- end }} + {{ .Content }} + {{ partial "heading.html" (dict "text" "Modules" "level" 2) }} +
      + {{- range .Pages }} +
    1. + {{ .LinkTitle }} +

      {{ plainify .Description }}

      +
    2. + {{- end }} +
    +
    +{{ end }} + +{{ define "right" }}{{ end }} diff --git a/layouts/shortcodes/admin-domain-audit.md b/layouts/shortcodes/admin-domain-audit.md deleted file mode 100644 index 01e8da1179d..00000000000 --- a/layouts/shortcodes/admin-domain-audit.md +++ /dev/null @@ -1,29 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $domain_navigation := "Select **My Hub**, your organization, **Settings**, and then **Security**." }} -{{ $sso_link := "[SSO](/security/for-admins/single-sign-on/)" }} -{{ $scim_link := "[SCIM](/security/for-admins/provisioning/scim/)" }} - -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://admin.docker.com)" }} - {{ $domain_navigation = "Select your organization on the **Choose profile** page, and then select **Domain management**." }} - {{ $sso_link = "[SSO](/security/for-admins/single-sign-on/)" }} - {{ $scim_link = "[SCIM](/security/for-admins/provisioning/scim/)" }} -{{ end }} - -To audit your domains: - -1. Sign in to {{ $product_link }}. -2. {{ $domain_navigation }} -3. In **Domain Audit**, select **Export Users** to export a CSV file of uncaptured users with the following columns: - - - Name: The name of the user. - - Username: The Docker ID of the user. - - Email: The email address of the user. - -You can invite all the uncaptured users to your organization using the exported CSV file. For more details, see [Invite members](/admin/organization/members/). Optionally, enforce single sign-on or enable SCIM to add users to your organization automatically. For more details, see {{ $sso_link }} or {{ $scim_link }}. - -> [!NOTE] -> -> Domain audit may identify accounts of users who are no longer a part of your organization. If you don't want to add a user to your organization and you don't want the user to appear in future domain audits, the user must deactivate their account or update their associated email address. -> -> You can't deactivate an account or update an associated email address on behalf of a user. For more details, see [Deactivating an account](/manuals/accounts/deactivate-user-account.md). \ No newline at end of file diff --git a/layouts/shortcodes/admin-domains.html b/layouts/shortcodes/admin-domains.html deleted file mode 100644 index f3374f4c32d..00000000000 --- a/layouts/shortcodes/admin-domains.html +++ /dev/null @@ -1,35 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $domain_navigation := `Navigate to the domain settings page for your organization. Select **My Hub**, your organization, **Settings**, and then **Security**.` }} - -{{ if eq (.Get "product") "admin" }} -{{ $product_link = "the [Admin Console](https://admin.docker.com)" }} -{{ $domain_navigation = "Select your organization or company in the left navigation drop-down menu, and then select **Domain management**. Note that when an organization is part of a company, you must select the company and configure the domain for that organization at the company level. Each organization in a company can have its own domain, but it must be configured at the company level." }} -{{ end }} - - -1. Sign in to {{ $product_link }}. -2. {{ $domain_navigation }} -3. Select **Add a domain**. -4. Continue with the on-screen instructions to get a verification code for - your domain as a **TXT Record Value**. - - > [!NOTE] - > - > Format your domains without protocol or www information, for example, - > `yourcompany.example`. This should include all email domains and - > subdomains users will use to access Docker, for example - > `yourcompany.example` and `us.yourcompany.example`. Public domains such as - > `gmail.com`, `outlook.com`, etc. aren’t permitted. - - > [!TIP] - > - > Make sure that the TXT record name that you create on your DNS matches - > the domain you registered on Docker in Step 4. For example, - > if you registered the subdomain `us.yourcompany.example`, - > you need to create a TXT record within the same name/zone `us`. - > A root domain such as `yourcompany.example` needs a TXT record on the - > root zone, which is typically denoted with the `@` name for the record. - -5. Once you have waited 72 hours for the TXT record verification, - you can then select **Verify** next to the domain you've added, - and follow the on-screen instructions. diff --git a/layouts/shortcodes/admin-image-access.html b/layouts/shortcodes/admin-image-access.html deleted file mode 100644 index a6d03ad3f5a..00000000000 --- a/layouts/shortcodes/admin-image-access.html +++ /dev/null @@ -1,27 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $iam_navigation := "Select **My Hub**, select your organization in the left navigation drop-down menu, and then select **Image access**." }} - -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://admin.docker.com)" }} - {{ $iam_navigation = "Select your organization in the left navigation drop-down menu, and then select **Image access**." }} -{{ end }} - -1. Sign in to {{ $product_link }}. -2. {{ $iam_navigation }} -3. Enable Image Access Management to set the permissions for the following categories of images you can manage: - - - **Organization Images**: Images from your organization are always allowed by default. These images can be public or private created by members within your organization. - - **Docker Official Images**: A curated set of Docker repositories hosted on Hub. They provide OS repositories, best practices for Dockerfiles, drop-in solutions, and applies security updates on time. - - **Docker Verified Publisher Images**: Images published by Docker partners that are part of the Verified Publisher program and are qualified to be included in the developer secure supply chain. - - **Community Images**: These images are disabled by default when Image Access Management is enabled because various users contribute them and they may pose security risks. This category includes Docker-Sponsored Open Source images. - - > [!NOTE] - > - > Image Access Management is turned off by default. However, owners in your organization have access to all images regardless of the settings. - -4. Select the category restrictions for your images by selecting **Allowed**. - Once the restrictions are applied, your members can view the organization permissions page in a read-only format. - -## Verify the restrictions - -The new Image Access Management policy takes effect after the developer successfully authenticates to Docker Desktop using their organization credentials. If a developer attempts to pull a disallowed image type using Docker, they receive an error message. diff --git a/layouts/shortcodes/admin-org-audit-log.html b/layouts/shortcodes/admin-org-audit-log.html deleted file mode 100644 index eaed5a99337..00000000000 --- a/layouts/shortcodes/admin-org-audit-log.html +++ /dev/null @@ -1,29 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $audit_navigation := "Select **My Hub**, your organization, and then **Activity**." }} -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://admin.docker.com)" }} - {{ $audit_navigation = "Select your organization in the left navigation drop-down menu, and then select **Activity logs**." }} -{{ end }} - -### View the activity logs - -To view the activity logs: - -1. Sign in to {{ $product_link }}. -2. {{ $audit_navigation }} - -> [!NOTE] -> -> Docker retains the activity data for a period of three months. - -### Customize the activity logs - -By default, all activities that occur are displayed on the **Activity** tab. Use the calendar option to select a date range and customize your results. After you have selected a date range, the activity logs of all the activities that occurred during that period are displayed. - -> [!NOTE] -> -> Activities created by the Docker Support team as part of resolving customer issues appear in the activity logs as **dockersupport**. - -Select the **All Activities** drop-down to view activities that are specific to an organization, repository, or billing. In Docker Hub, if you select the **Activities** tab from the **Repository** view, you can only filter repository-level activities. - -After choosing **Organization**, **Repository**, or **Billing**, you can further refine the results using the **All Actions** drop-down. diff --git a/layouts/shortcodes/admin-registry-access.html b/layouts/shortcodes/admin-registry-access.html deleted file mode 100644 index 7e15ad4d5ad..00000000000 --- a/layouts/shortcodes/admin-registry-access.html +++ /dev/null @@ -1,51 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $ram_navigation := "Select **My Hub**, your organization, **Settings**, and then select **Registry Access**." }} -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://admin.docker.com)" }} - {{ $ram_navigation = "Select your organization in the left navigation drop-down menu, and then select **Registry access**." }} -{{ end }} - -To configure Registry Access Management permissions, perform the following steps: - -1. Sign in to {{ $product_link }}. -2. {{ $ram_navigation }} -3. Enable Registry Access Management to set the permissions for your registry. - - > [!NOTE] - > - > When enabled, the Docker Hub registry is set by default; however you can - > also restrict this registry for your developers. - -4. Select **Add registry** and enter your registry details in the applicable -fields, and then select **Create** to add the registry to your list. You can -add up to 100 registries/domains. -5. Verify that the registry appears in your list and select **Save changes**. - -Once you add a registry, it can take up to 24 hours for the changes to be -enforced on your developers’ machines. - -If you want to apply the changes sooner, you must force a Docker signout on your -developers’ machine and have the developers re-authenticate for Docker Desktop. -See the [Caveats](#caveats) section below to learn more about limitations. - -> [!IMPORTANT] -> -> Starting with Docker Desktop version 4.36, you can enforce sign-in for -multiple organizations. If a developer belongs to multiple organizations with -different RAM policies, only the RAM policy for the first organization listed -in the `registry.json` file, `.plist` file, or registry key is enforced. - -> [!TIP] -> -> Since RAM sets policies about where content can be fetched from, the -[ADD](/reference/dockerfile/#add) instruction of the Dockerfile when the -parameter of the ADD instruction is a URL is also subject to registry -restrictions. -> -> If you're using ADD to fetch an image or artifact from a trusted registry via -> URL, make sure the registry's domain is included in your organzation's -> allowed registries list. -> -> RAM is not intended to restrict access to general-purpose external URLs, for -> example, package mirrors or storage services. Attempting to add too many domains -> may cause errors or hit system limits. diff --git a/layouts/shortcodes/admin-scim-disable.html b/layouts/shortcodes/admin-scim-disable.html deleted file mode 100644 index ea4d9cd476a..00000000000 --- a/layouts/shortcodes/admin-scim-disable.html +++ /dev/null @@ -1,13 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $sso_navigation := `Navigate to the SSO settings page for your organization. - - Organization: Select **My Hub**, your organization, **Settings**, and then **Security**.` }} - -{{ if eq (.Get "product") "admin" }} -{{ $product_link = "the [Admin Console](https://admin.docker.com)" }} -{{ $sso_navigation = "Select your organization or company in the left navigation drop-down menu, and then select **SSO and SCIM.**" }} -{{ end }} - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table, select the **Actions** icon. -4. Select **Disable SCIM**. diff --git a/layouts/shortcodes/admin-scim.html b/layouts/shortcodes/admin-scim.html deleted file mode 100644 index 48300d8ff3d..00000000000 --- a/layouts/shortcodes/admin-scim.html +++ /dev/null @@ -1,13 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $sso_navigation := `Navigate to the SSO settings page for your organization. - - Organization: Select **My Hub**, your organization, **Settings**, and then **Security**.` }} - -{{ if eq (.Get "product") "admin" }} -{{ $product_link = "the [Admin Console](https://admin.docker.com)" }} -{{ $sso_navigation = "Select your organization or company in the left navigation drop-down menu, and then select **SSO and SCIM.**" }} -{{ end }} - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table, select the **Actions** icon and **Setup SCIM**. -4. Copy the **SCIM Base URL** and **API Token** and paste the values into your IdP. diff --git a/layouts/shortcodes/admin-sso-config.md b/layouts/shortcodes/admin-sso-config.md deleted file mode 100644 index 7f205e74bff..00000000000 --- a/layouts/shortcodes/admin-sso-config.md +++ /dev/null @@ -1,36 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $sso_navigation := `Navigate to the SSO settings page for your organization. Select **My Hub**, your organization, **Settings**, and then **Security**.` }} - -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://admin.docker.com)" }} - {{ $sso_navigation = "Select your organization or company from the **Choose profile** page, and then select **SSO and SCIM**. Note that when an organization is part of a company, you must select the company and configure SSO for that organization at the company level. Each organization can have its own SSO configuration and domain, but it must be configured at the company level." }} -{{ end }} - -> [!IMPORTANT] -> -> If your IdP setup requires an Entity ID and the ACS URL, you must select the -> **SAML** tab in the **Authentication Method** section. For example, if your -> Entra ID (formerly Azure AD) Open ID Connect (OIDC) setup uses SAML configuration within Azure -> AD, you must select **SAML**. If you are [configuring Open ID Connect with Entra ID (formerly Azure AD)](https://docs.microsoft.com/en-us/powerapps/maker/portals/configure/configure-openid-settings) select -> **Azure AD (OIDC)** as the authentication method. Also, IdP initiated connections -> aren't supported at this time. - -After your domain is verified, create an SSO connection. - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table select **Create Connection**, and create a name for the connection. - - > [!NOTE] - > - > You have to verify at least one domain before creating the connections. - -4. Select an authentication method, **SAML** or **Azure AD (OIDC)**. -5. Copy the following fields to add to your IdP: - - - SAML: **Entity ID**, **ACS URL** - - Azure AD (OIDC): **Redirect URL** - - ![SAML](/docker-hub/images/saml-create-connection.png) - - ![Azure AD](/docker-hub/images/azure-create-connection.png) diff --git a/layouts/shortcodes/admin-sso-connect.md b/layouts/shortcodes/admin-sso-connect.md deleted file mode 100644 index 89ab466a256..00000000000 --- a/layouts/shortcodes/admin-sso-connect.md +++ /dev/null @@ -1,48 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $sso_navigation := `Navigate to the SSO settings page for your organization. Select **My Hub**, your organization, **Settings**, and then **Security**.` }} - -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://admin.docker.com)" }} - {{ $sso_navigation = "Select your organization or company from the **Choose profile** page, and then select **SSO and SCIM**. Note that when an organization is part of a company, you must select the company and configure SSO for that organization at the company level. Each organization can have its own SSO configuration and domain, but it must be configured at the company level." }} -{{ end }} - -1. In {{ $product_link }}, select the verified domains you want to apply the connection to. -2. To provision your users, select the organization(s) and/or team(s). -3. Review your summary and select **Create Connection**. - -## Test your SSO configuration - -After you’ve completed the SSO configuration process in Docker, you can test the configuration when you sign in to {{ $product_link }} using an incognito browser. Sign in to {{ $product_link }} using your domain email address. You are then redirected to your IdP's login page to authenticate. - -1. Authenticate through email instead of using your Docker ID, and test the login process. -2. To authenticate through CLI, your users must have a PAT before you enforce SSO for CLI users. - -> [!IMPORTANT] -> -> SSO has Just-in-Time (JIT) provisioning enabled by default, unless you have [disabled it](/security/for-admins/provisioning/just-in-time/#sso-authentication-with-jit-provisioning-disabled). This means your users are auto-provisioned to your organization. -> -> You can change this on a per-app basis. To prevent auto-provisioning users, you can create a security group in your IdP and configure the SSO app to authenticate and authorize only those users that are in the security group. Follow the instructions provided by your IdP: -> -> - [Okta](https://help.okta.com/en-us/Content/Topics/Security/policies/configure-app-signon-policies.htm) -> - [Entra ID (formerly Azure AD)](https://learn.microsoft.com/en-us/azure/active-directory/develop/howto-restrict-your-app-to-a-set-of-users) -> -> Alternatively, see [Manage how users are provisioned](/security/for-admins/single-sign-on/manage/). - -The SSO connection is now created. You can continue to set up SCIM without enforcing SSO log-in. For more information about setting up SCIM, see [Set up SCIM](/security/for-admins/provisioning/scim/). - -## Optional: Enforce SSO - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table, select the **Action** icon and then **Enable enforcement**. - - When SSO is enforced, your users are unable to modify their email address and password, convert a user account to an organization, or set up 2FA through Docker Hub. You must enable 2FA through your IdP. - -4. Continue with the on-screen instructions and verify that you’ve completed the tasks. -5. Select **Turn on enforcement** to complete. - -Your users must now sign in to Docker with SSO. - -> [!IMPORTANT] -> -> If SSO isn't enforced, users can choose to sign in with either their Docker ID or SSO. diff --git a/layouts/shortcodes/admin-sso-management-connections.md b/layouts/shortcodes/admin-sso-management-connections.md deleted file mode 100644 index a8759c36328..00000000000 --- a/layouts/shortcodes/admin-sso-management-connections.md +++ /dev/null @@ -1,27 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $sso_navigation := `Navigate to the SSO settings page for your organization. Select **My Hub**, your organization, **Settings**, and then **Security**.` }} - -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://app.docker.com/admin)" }} - {{ $sso_navigation = "Select your organization or company from the Choose profile page, and then select **SSO and SCIM**. Note that when an organization is part of a company, you must select the company and configure SSO for that organization at the company level. Each organization can have its own SSO configuration and domain, but it must be configured at the company level." }} -{{ end }} - -### Edit a connection - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table, select the **Action** icon. -4. Select **Edit connection**. -5. Follow the on-screen instructions to edit the connection. - -### Delete a connection - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table, select the **Action** icon. -4. Select **Delete connection**. -5. Follow the on-screen instructions to delete a connection. - -### Deleting SSO - -When you disable SSO, you can delete the connection to remove the configuration settings and the added domains. Once you delete this connection, it can't be undone. If an SSO connection is deleted, Docker users must authenticate with their Docker ID and password. \ No newline at end of file diff --git a/layouts/shortcodes/admin-sso-management-orgs.md b/layouts/shortcodes/admin-sso-management-orgs.md deleted file mode 100644 index 12406a00f41..00000000000 --- a/layouts/shortcodes/admin-sso-management-orgs.md +++ /dev/null @@ -1,26 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $sso_navigation := "Select **My Hub**, your organization, and then **Settings**." }} -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://app.docker.com/admin)" }} - {{ $sso_navigation = "Select your company from the **Choose profile** page, and then select **SSO and SCIM**." }} -{{ end }} - -### Connect an organization - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table, select the **Action** icon and then **Edit connection**. -4. Select **Next** to navigate to the section where connected organizations are listed. -5. In the **Organizations** drop-down, select the organization to add to the connection. -6. Select **Next** to confirm or change the default organization and team provisioning. -7. Review the **Connection Summary** and select **Update connection**. - -### Remove an organization - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table, select the **Action** icon and then **Edit connection**. -4. Select **Next** to navigate to the section where connected organizations are listed. -5. In the **Organizations** drop-down, select **Remove** to remove the connection. -6. Select **Next** to confirm or change the default organization and team provisioning. -7. Review the **Connection Summary** and select **Update connection**. diff --git a/layouts/shortcodes/admin-sso-management.md b/layouts/shortcodes/admin-sso-management.md deleted file mode 100644 index 9d04bebfea8..00000000000 --- a/layouts/shortcodes/admin-sso-management.md +++ /dev/null @@ -1,26 +0,0 @@ -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $sso_navigation := `Navigate to the SSO settings page for your organization. Select **My Hub**, your organization, **Settings**, and then **Security**.` }} - -{{ if eq (.Get "product") "admin" }} - {{ $product_link = "the [Admin Console](https://app.docker.com/admin)" }} - {{ $sso_navigation = "Select your organization or company from the **Choose profile** page, and then select **SSO and SCIM**." }} -{{ end }} - -### Remove a domain from an SSO connection - -> [!IMPORTANT] -> -> Docker supports multiple IdP configurations, where a single domain is used for multiple SSO identity providers. If you want to remove a domain from multiple SSO connections, you must remove it from each connection individually. - -1. Sign in to {{ $product_link }}. -2. {{ $sso_navigation }} -3. In the SSO connections table, select the **Action** icon and then **Edit connection**. -4. Select **Next** to navigate to the section where the connected domains are listed. -5. In the **Domain** drop-down, select the **x** icon next to the domain that you want to remove. -6. Select **Next** to confirm or change the connected organization(s). -7. Select **Next** to confirm or change the default organization and team provisioning selections. -8. Review the **Connection Summary** and select **Update connection**. - -> [!NOTE] -> -> If you want to re-add the domain, a new TXT record value is assigned. You must then complete the verification steps with the new TXT record value. diff --git a/layouts/shortcodes/admin-users.html b/layouts/shortcodes/admin-users.html deleted file mode 100644 index f305201c23d..00000000000 --- a/layouts/shortcodes/admin-users.html +++ /dev/null @@ -1,131 +0,0 @@ -{{ $invite_button := "**Invite members**" }} -{{ $export_button := "**Export members**" }} -{{ $member_navigation := "Select **My Hub**, your organization, and then **Members**." }} -{{ $remove_button := "**Remove member**" }} -{{ $product_link := "[Docker Hub](https://hub.docker.com)" }} -{{ $role_mapping_link := "[SCIM for role mapping](/security/for-admins/provisioning/scim/)" }} -{{ $export_fields := `The CSV file for an organization contains the following fields: -* **Name**: The user's name. -* **Username**: The user's Docker ID. -* **Email**: The user's email address. -* **Type**: The type of user. For example, **Invitee** for users who have not accepted the organization's invite, - or **User** for users who are members of the organization. -* **Role**: The user's role in the organization. For example, **Member** or **Owner**. -* **Teams**: The teams where the user is a member. A team is not listed for invitees. -* **Date Joined**: The time and date when the user was invited to the organization.` }} - -{{ if eq (.Get "product") "admin" }} -{{ $invite_button = "**Invite**" }} -{{ $export_button = "the **Action** icon and then select **Export users as CSV**" }} -{{ $member_navigation = "Select your organization from the **Choose profile** page, and then select **Members**." }} -{{ $remove_button = "**Remove member**" }} -{{ $product_link = "the [Admin Console](https://admin.docker.com)" }} -{{ $role_mapping_link = "[SCIM for role mapping](/security/for-admins/provisioning/scim/)" }} -{{ if eq (.Get "layer") "company" }} -{{ $export_fields = `The CSV file for a company contains the following fields: -* **Name**: The user's name. -* **Username**: The user's Docker ID. -* **Email**: The user's email address. -* **Member of Organizations**: All organizations the user is a member of within a company. -* **Invited to Organizations**: All organizations the user is an invitee of within a company. -* **Account Created**: The time and date when the user account was created.` }} -{{ $member_navigation = "Select your organization from the **Choose profile** page, and then select **Members**." }} -{{ $remove_button = "**Remove user**" }} -{{ $role_mapping_link = "[SCIM for role mapping](/security/for-admins/provisioning/scim/)"}} -{{ end }} -{{ end }} - -Owners can invite new members to an organization via Docker ID, email address, or with a CSV file containing email -addresses. If an invitee does not have a Docker account, they must create an account and verify their email address -before they can accept an invitation to join the organization. When inviting members, their pending invitation occupies -a seat. - -### Invite members via Docker ID or email address - -Use the following steps to invite members to your organization via Docker ID or email address. To invite a large amount -of members to your organization via CSV file, see the next section. - -1. Sign in to {{ $product_link }}. -2. {{ $member_navigation }} -3. Select {{ $invite_button }}. -4. Select **Emails or usernames**. -5. Follow the on-screen instructions to invite members. - Invite a maximum of 1000 members and separate multiple entries by comma, semicolon, or space. - - > [!NOTE] - > - > When you invite members, you assign them a role. - > See [Roles and permissions](/security/for-admins/roles-and-permissions/) - > for details about the access permissions for each role. - - Pending invitations appear in the table. The invitees receive an email with a link to Docker Hub where they can accept - or decline the invitation. - -### Invite members via CSV file - -To invite multiple members to an organization via a CSV file containing email addresses: - -1. Sign in to {{ $product_link }}. -2. {{ $member_navigation }} -3. Select {{ $invite_button }}. -4. Select **CSV upload**. -5. Select **Download the template CSV file** to optionally download an example CSV file. - The following is an example of the contents of a valid CSV file. - - ```text - email - docker.user-0@example.com - docker.user-1@example.com - ``` - - CSV file requirements: - - - The file must contain a header row with at least one heading named `email`. Additional columns are allowed and are - ignored in the import. - - The file must contain a maximum of 1000 email addresses (rows). To invite more than 1000 users, create multiple CSV - files and perform all steps in this task for each file. - -6. Create a new CSV file or export a CSV file from another application. - - - To export a CSV file from another application, see the application’s documentation. - - To create a new CSV file, open a new file in a text editor, type `email` on the first line, type the user email - addresses one per line on the following lines, and then save the file with a .csv extension. - -7. Select **Browse files** and then select your CSV file, or drag and drop the CSV file into the **Select a CSV file to - upload** box. You can only select one CSV file at a time. - - > [!NOTE] - > - > If the amount of email addresses in your CSV file exceeds the number of available seats in your organization, you - > cannot continue to invite members. To invite members, you can purchase more seats, or remove some email addresses from - > the CSV file and re-select the new file. To purchase more seats, see [Add seats to your - > subscription](/subscription/add-seats/) or [Contact sales](https://www.docker.com/pricing/contact-sales/). - -8. After the CSV file has been uploaded, select **Review**. - - Valid email addresses and any email addresses that have issues appear. - Email addresses may have the following issues: - - - Invalid email: The email address is not a valid address. - The email address will be ignored if you send invites. - You can correct the email address in the CSV file and re-import the file. - - Already invited: The user has already been sent an invite email and another invite email will not be sent. - - Member: The user is already a member of your organization and an invite email will not be sent. - - Duplicate: The CSV file has multiple occurrences of the same email address. - The user will be sent only one invite email. - -9. Follow the on-screen instructions to invite members. - - > [!NOTE] - > - > When you invite members, you assign them a role. - > See [Roles and permissions](/security/for-admins/roles-and-permissions/) - > for details about the access permissions for each role. - -Pending invitations appear in the table. The invitees receive an email with a link to Docker Hub where they can accept -or decline the invitation. - -### Invite members via API - -You can bulk invite members using the Docker Hub API. For more information, see -the [Bulk create invites](https://docs.docker.com/reference/api/hub/latest/#tag/invites/paths/~1v2~1invites~1bulk/post) API endpoint. \ No newline at end of file diff --git a/layouts/shortcodes/button.html b/layouts/shortcodes/button.html deleted file mode 100644 index d663143b484..00000000000 --- a/layouts/shortcodes/button.html +++ /dev/null @@ -1,14 +0,0 @@ -{{ $text := .Get "text" }} -{{ $url := .Get "url" }} -{{- if (strings.HasPrefix $url "http") -}} - {{ $url = $url | safeURL }} -{{- else if (strings.FindRE `([^_]|^)index.md` $url 1) -}} - {{ $url = ref .Page (strings.Replace $url "index.md" "_index.md") }} -{{- else -}} - {{ $url = ref .Page $url }} -{{- end -}} - diff --git a/layouts/shortcodes/card.html b/layouts/shortcodes/card.html deleted file mode 100644 index 6df8788b68e..00000000000 --- a/layouts/shortcodes/card.html +++ /dev/null @@ -1,9 +0,0 @@ -{{ partial "components/card.html" - (dict - "description" (.Get "description") - "link" (.Get "link") - "title" (.Get "title") - "icon" (.Get "icon") - "image" (.Get "image") - ) -}} diff --git a/layouts/shortcodes/cta.html b/layouts/shortcodes/cta.html deleted file mode 100644 index 754defc5c10..00000000000 --- a/layouts/shortcodes/cta.html +++ /dev/null @@ -1,10 +0,0 @@ - - diff --git a/layouts/shortcodes/desktop-install-v2.html b/layouts/shortcodes/desktop-install-v2.html deleted file mode 100644 index e2cf1dac9e8..00000000000 --- a/layouts/shortcodes/desktop-install-v2.html +++ /dev/null @@ -1,46 +0,0 @@ -{{- $all := .Get "all" -}} -{{- $win := .Get "win" -}} -{{- $beta_win_arm := .Get "beta_win_arm" -}} -{{- $mac := .Get "mac" -}} -{{- $linux := .Get "linux" -}} -{{- $build_path := .Get "build_path" -}} -
    -

    Download Docker Desktop

    -

    - {{- if or $all $win }} - Windows - (checksum) | - {{ end }} - {{- if or $beta_win_arm }} - Windows ARM Beta - (checksum) | - {{ end }} - {{- if or $all $mac }} - Mac - with Apple chip - (checksum) | - Mac - with Intel chip - (checksum) - {{ end -}} - {{- if or $all $linux }} - | - Debian - - - RPM - - - Arch - (checksum) - {{- end -}} -

    -
    diff --git a/layouts/shortcodes/grid.html b/layouts/shortcodes/grid.html deleted file mode 100644 index de1a165e755..00000000000 --- a/layouts/shortcodes/grid.html +++ /dev/null @@ -1,8 +0,0 @@ -{{ $cols := .Get "cols" | default 3 }} -
    - {{ $items := index .Page.Params (.Get "items" | default "grid") }} - {{ range $items }} - {{ $opts := dict "title" .title "link" .link "description" .description "icon" .icon "image" .image }} - {{ partial "components/card.html" $opts }} - {{ end }} -
    diff --git a/layouts/shortcodes/release-date.html b/layouts/shortcodes/release-date.html deleted file mode 100644 index 884f4cd7b37..00000000000 --- a/layouts/shortcodes/release-date.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ .Get "date" }} diff --git a/layouts/shortcodes/tabs.html b/layouts/shortcodes/tabs.html deleted file mode 100644 index 3f1d845fe91..00000000000 --- a/layouts/shortcodes/tabs.html +++ /dev/null @@ -1,50 +0,0 @@ -{{ with .Inner }}{{/* don't do anything, just call it */}}{{ end }} -{{ $first := urlize (index (.Store.Get "tabs") 0).name }} -{{ $group := .Get "group" }} -{{ $groupID := fmt.Printf "tabgroup-%s" (urlize $group) }} -{{ $persist := .Get "persist" }} - -
    -
    - {{ range (.Store.Get "tabs") }} - - {{ end }} -
    -
    -
    - {{ range (.Store.Get "tabs") }} -
    - {{ .content | page.RenderString (dict "display" "block") }} -
    - {{ end }} -
    -
    -
    diff --git a/layouts/single.html b/layouts/single.html new file mode 100644 index 00000000000..834974e186e --- /dev/null +++ b/layouts/single.html @@ -0,0 +1,5 @@ +{{ define "article" }} +
    + {{ partial "content-default.html" . }} +
    +{{ end }} diff --git a/layouts/single.markdown.md b/layouts/single.markdown.md new file mode 100644 index 00000000000..54f30713767 --- /dev/null +++ b/layouts/single.markdown.md @@ -0,0 +1,3 @@ +# {{ .Title }} + +{{ .RenderShortcodes }} diff --git a/layouts/tag/taxonomy.html b/layouts/tag/taxonomy.html deleted file mode 100644 index aa642acf7a5..00000000000 --- a/layouts/tag/taxonomy.html +++ /dev/null @@ -1,23 +0,0 @@ -{{ define "left" }} - {{ partial "sidebar/mainnav.html" . }} - {{ partial "sidebar/tags.html" . }} -{{ end }} - -{{ define "main" }} -
    - {{ partial "breadcrumbs.html" . }} -

    - {{ partialCached "icon" "tag" "tag" }} - {{ .Title }} -

    - {{ .Content }} -
      - {{ range site.Taxonomies.tags }} -
    • - {{ .Page.Title }} - ({{ (len .Pages) }} {{ cond (gt (len .Pages) 1) "pages" "page" }}) -
    • - {{ end }} -
    -
    -{{ end }} diff --git a/layouts/tag/term.html b/layouts/tag/term.html deleted file mode 100644 index 0f46121ace5..00000000000 --- a/layouts/tag/term.html +++ /dev/null @@ -1,25 +0,0 @@ -{{ define "left" }} - {{ partial "sidebar/mainnav.html" . }} - {{ partial "sidebar/tags.html" . }} -{{ end }} - -{{ define "main" }} -
    - {{ partial "breadcrumbs.html" . }} -

    - {{ partialCached "icon" "tag" "tag" }} - {{ .Title }} -

    - {{ .Content }} - {{- range .Pages.GroupBy "Type" }} -

    {{ (site.GetPage .Key).LinkTitle }}

    - - {{- end }} -
    -{{ end }} diff --git a/layouts/wide.html b/layouts/wide.html new file mode 100644 index 00000000000..d31aab71937 --- /dev/null +++ b/layouts/wide.html @@ -0,0 +1 @@ +{{ define "right" }}{{ end }} diff --git a/netlify.toml b/netlify.toml index 5fa86c689e7..5edd78ad365 100644 --- a/netlify.toml +++ b/netlify.toml @@ -2,11 +2,12 @@ publish = "public" [context.deploy-preview.environment] -NODE_VERSION = "22" +NODE_VERSION = "24" NODE_ENV = "production" -HUGO_VERSION = "0.141.0" +HUGO_VERSION = "0.161.1" HUGO_ENABLEGITINFO = "true" HUGO_ENVIRONMENT = "preview" +SECRETS_SCAN_OMIT_PATHS = "public/contribute/file-conventions/index.html" [context.deploy-preview] -command = "hugo --gc --minify -b $DEPLOY_PRIME_URL && npx pagefind@v1.3.0" +command = "hugo --gc --minify -b $DEPLOY_PRIME_URL && ./hack/flatten-and-resolve.js && npx pagefind@v1.5.2" diff --git a/package-lock.json b/package-lock.json index e0c4dcdd4ee..8e1ba3b84c8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,49 +9,35 @@ "version": "0.0.1", "license": "Apache License 2.0", "dependencies": { - "@alpinejs/collapse": "^3.14.3", - "@alpinejs/focus": "^3.14.3", - "@alpinejs/persist": "^3.14.3", - "@floating-ui/dom": "^1.6.12", - "@material-symbols/svg-400": "^0.23.0", - "@tailwindcss/nesting": "^0.0.0-insiders.565cd3e", - "@tailwindcss/typography": "^0.5.15", - "alpinejs": "^3.14.3", - "autoprefixer": "^10.4.20", - "postcss": "^8.4.49", - "postcss-cli": "^11.0.0", - "postcss-import": "^16.1.0", - "tailwindcss": "^3.4.15" + "@alpinejs/collapse": "3.15.8", + "@alpinejs/focus": "3.15.8", + "@alpinejs/persist": "3.15.8", + "@floating-ui/dom": "1.7.6", + "@material-symbols/svg-400": "0.40.2", + "@tailwindcss/cli": "4.2.1", + "@tailwindcss/typography": "0.5.19", + "alpinejs": "3.15.8", + "highlight.js": "11.11.1", + "marked": "17.0.4", + "tailwindcss": "4.2.1" }, "devDependencies": { - "markdownlint": "^0.35.0", - "prettier": "^3.3.3", - "prettier-plugin-go-template": "^0.0.15", - "prettier-plugin-tailwindcss": "^0.6.8" - } - }, - "node_modules/@alloc/quick-lru": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", - "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "markdownlint": "0.40.0", + "prettier": "3.8.1", + "prettier-plugin-go-template": "0.0.15", + "prettier-plugin-tailwindcss": "0.7.2" } }, "node_modules/@alpinejs/collapse": { - "version": "3.14.3", - "resolved": "https://registry.npmjs.org/@alpinejs/collapse/-/collapse-3.14.3.tgz", - "integrity": "sha512-gqyzDLf6i6GPzqJROClVXpq10q/5gYfmcnmBs+UGHkHHeu7VYkMJUzH/ukPmbRmnJnZ2xkjEZz6yRrjun6N88A==", + "version": "3.15.8", + "resolved": "https://registry.npmjs.org/@alpinejs/collapse/-/collapse-3.15.8.tgz", + "integrity": "sha512-zZhD8DHdHuzGFe8+cHNH99K//oFutzKwcy6vagydb3KFlTzmqxTnHZo5sSV81lAazhV7qKsYCKtNV14tR9QkJw==", "license": "MIT" }, "node_modules/@alpinejs/focus": { - "version": "3.14.3", - "resolved": "https://registry.npmjs.org/@alpinejs/focus/-/focus-3.14.3.tgz", - "integrity": "sha512-ZBL6HziMXhQIuta3PQjpYaMb5Ro9VPqh0mkP+d1uefJnhliBMWUfQXOnobV/0zJUB9pDxzd78diDX3ywewoJ3g==", + "version": "3.15.8", + "resolved": "https://registry.npmjs.org/@alpinejs/focus/-/focus-3.15.8.tgz", + "integrity": "sha512-YPF7jtaMFqrWI7xLjathWe/Hi/dXLReGoeZZxJZCW6pgmbFaVKqUFhl3DA+kUPSYsEhh9xk2wr9yoTZ2aB8xew==", "license": "MIT", "dependencies": { "focus-trap": "^6.9.4", @@ -59,65 +45,54 @@ } }, "node_modules/@alpinejs/persist": { - "version": "3.14.3", - "resolved": "https://registry.npmjs.org/@alpinejs/persist/-/persist-3.14.3.tgz", - "integrity": "sha512-atWMKAHdDBKxHoiAEVAx9Ft+d1m9BzsepTkPZwrJmLNYPoyXnpUp1NrPLiSLB6FzUFRPoIV0tMF2kgnRwzp+vA==", + "version": "3.15.8", + "resolved": "https://registry.npmjs.org/@alpinejs/persist/-/persist-3.15.8.tgz", + "integrity": "sha512-xKJk0aa5p0QAquP9ivEuLnpzuBC2MyoWmiMRVzWiTR9/VLnbVgFM+6dE/D+NvfKOzhTSeht6FkYqOi/C27LBTg==", "license": "MIT" }, "node_modules/@floating-ui/core": { - "version": "1.6.8", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.8.tgz", - "integrity": "sha512-7XJ9cPU+yI2QeLS+FCSlqNFZJq8arvswefkZrYI1yQBbftw6FyrZOxYSh+9S7z7TpeWlRt9zJ5IhM1WIL334jA==", + "version": "1.7.5", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.5.tgz", + "integrity": "sha512-1Ih4WTWyw0+lKyFMcBHGbb5U5FtuHJuujoyyr5zTaWS5EYMeT6Jb2AuDeftsCsEuchO+mM2ij5+q9crhydzLhQ==", "license": "MIT", "dependencies": { - "@floating-ui/utils": "^0.2.8" + "@floating-ui/utils": "^0.2.11" } }, "node_modules/@floating-ui/dom": { - "version": "1.6.12", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.12.tgz", - "integrity": "sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w==", + "version": "1.7.6", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.6.tgz", + "integrity": "sha512-9gZSAI5XM36880PPMm//9dfiEngYoC6Am2izES1FF406YFsjvyBMmeJ2g4SAju3xWwtuynNRFL2s9hgxpLI5SQ==", "license": "MIT", "dependencies": { - "@floating-ui/core": "^1.6.0", - "@floating-ui/utils": "^0.2.8" + "@floating-ui/core": "^1.7.5", + "@floating-ui/utils": "^0.2.11" } }, "node_modules/@floating-ui/utils": { - "version": "0.2.8", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.8.tgz", - "integrity": "sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==", + "version": "0.2.11", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.11.tgz", + "integrity": "sha512-RiB/yIh78pcIxl6lLMG0CgBXAZ2Y0eVHqMPYugu+9U0AeT6YBeiJpf7lbdJNIugFP5SIjwNRgo4DhR1Qxi26Gg==", "license": "MIT" }, - "node_modules/@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "license": "ISC", + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "license": "MIT", "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" } }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", - "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.2.1", - "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { @@ -129,25 +104,16 @@ "node": ">=6.0.0" } }, - "node_modules/@jridgewell/set-array": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", - "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", @@ -155,715 +121,822 @@ } }, "node_modules/@material-symbols/svg-400": { - "version": "0.23.0", - "resolved": "https://registry.npmjs.org/@material-symbols/svg-400/-/svg-400-0.23.0.tgz", - "integrity": "sha512-bsvGmBds729rZYOCOgxs4FjpktXfkhcprsCNPv+PRIDE3K/b30bnOsFAylUUfJ6cnHoXucS672VdNe80YIUxwA==", + "version": "0.40.2", + "resolved": "https://registry.npmjs.org/@material-symbols/svg-400/-/svg-400-0.40.2.tgz", + "integrity": "sha512-e2yEgZW/OveVT1sGaZW1kkRWTPVghjsJYWy+vIea3q08Fv2o7FCYv23PESMyr5D4AaAXdM5dKWkF1e6yIm4swA==", "license": "Apache-2.0" }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "node_modules/@parcel/watcher": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.1.tgz", + "integrity": "sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==", + "hasInstallScript": true, "license": "MIT", "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" + "detect-libc": "^1.0.3", + "is-glob": "^4.0.3", + "micromatch": "^4.0.5", + "node-addon-api": "^7.0.0" }, "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "@parcel/watcher-android-arm64": "2.5.1", + "@parcel/watcher-darwin-arm64": "2.5.1", + "@parcel/watcher-darwin-x64": "2.5.1", + "@parcel/watcher-freebsd-x64": "2.5.1", + "@parcel/watcher-linux-arm-glibc": "2.5.1", + "@parcel/watcher-linux-arm-musl": "2.5.1", + "@parcel/watcher-linux-arm64-glibc": "2.5.1", + "@parcel/watcher-linux-arm64-musl": "2.5.1", + "@parcel/watcher-linux-x64-glibc": "2.5.1", + "@parcel/watcher-linux-x64-musl": "2.5.1", + "@parcel/watcher-win32-arm64": "2.5.1", + "@parcel/watcher-win32-ia32": "2.5.1", + "@parcel/watcher-win32-x64": "2.5.1" + } + }, + "node_modules/@parcel/watcher-android-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.1.tgz", + "integrity": "sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==", + "cpu": [ + "arm64" + ], "license": "MIT", + "optional": true, + "os": [ + "android" + ], "engines": { - "node": ">= 8" + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "node_modules/@parcel/watcher-darwin-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.1.tgz", + "integrity": "sha512-eAzPv5osDmZyBhou8PoF4i6RQXAfeKL9tjb3QzYuccXFMQU0ruIc/POh30ePnaOyD1UXdlKguHBmsTs53tVoPw==", + "cpu": [ + "arm64" + ], "license": "MIT", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": ">= 8" + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "node_modules/@parcel/watcher-darwin-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.1.tgz", + "integrity": "sha512-1ZXDthrnNmwv10A0/3AJNZ9JGlzrF82i3gNQcWOzd7nJ8aj+ILyW1MTxVk35Db0u91oD5Nlk9MBiujMlwmeXZg==", + "cpu": [ + "x64" + ], "license": "MIT", "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": ">=14" + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/@sindresorhus/merge-streams": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz", - "integrity": "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==", + "node_modules/@parcel/watcher-freebsd-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.1.tgz", + "integrity": "sha512-SI4eljM7Flp9yPuKi8W0ird8TI/JK6CSxju3NojVI6BjHsTyK7zxA9urjVjEKJ5MBYC+bLmMcbAWlZ+rFkLpJQ==", + "cpu": [ + "x64" + ], "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], "engines": { - "node": ">=18" + "node": ">= 10.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/@tailwindcss/nesting": { - "version": "0.0.0-insiders.565cd3e", - "resolved": "https://registry.npmjs.org/@tailwindcss/nesting/-/nesting-0.0.0-insiders.565cd3e.tgz", - "integrity": "sha512-WhHoFBx19TnH/c+xLwT/sxei6+4RpdfiyG3MYXfmLaMsADmVqBkF7B6lDalgZD9YdM459MF7DtxVbWkOrV7IaQ==", + "node_modules/@parcel/watcher-linux-arm-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.1.tgz", + "integrity": "sha512-RCdZlEyTs8geyBkkcnPWvtXLY44BCeZKmGYRtSgtwwnHR4dxfHRG3gR99XdMEdQ7KeiDdasJwwvNSF5jKtDwdA==", + "cpu": [ + "arm" + ], "license": "MIT", - "dependencies": { - "postcss-nested": "^5.0.5" + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/@tailwindcss/typography": { - "version": "0.5.15", - "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.15.tgz", - "integrity": "sha512-AqhlCXl+8grUz8uqExv5OTtgpjuVIwFTSXTrh8y9/pw6q2ek7fJ+Y8ZEVw7EB2DCcuCOtEjf9w3+J3rzts01uA==", + "node_modules/@parcel/watcher-linux-arm-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.1.tgz", + "integrity": "sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q==", + "cpu": [ + "arm" + ], "license": "MIT", - "dependencies": { - "lodash.castarray": "^4.4.0", - "lodash.isplainobject": "^4.0.6", - "lodash.merge": "^4.6.2", - "postcss-selector-parser": "6.0.10" + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" }, - "peerDependencies": { - "tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/@vue/reactivity": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.1.5.tgz", - "integrity": "sha512-1tdfLmNjWG6t/CsPldh+foumYFo3cpyCHgBYQ34ylaMsJ+SNHQ1kApMIa8jN+i593zQuaw3AdWH0nJTARzCFhg==", + "node_modules/@parcel/watcher-linux-arm64-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.1.tgz", + "integrity": "sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w==", + "cpu": [ + "arm64" + ], "license": "MIT", - "dependencies": { - "@vue/shared": "3.1.5" + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/@vue/shared": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.1.5.tgz", - "integrity": "sha512-oJ4F3TnvpXaQwZJNF3ZK+kLPHKarDmJjJ6jyzVNDKH9md1dptjC7lWR//jrGuLdek/U6iltWxqAnYOu8gCiOvA==", - "license": "MIT" - }, - "node_modules/alpinejs": { - "version": "3.14.3", - "resolved": "https://registry.npmjs.org/alpinejs/-/alpinejs-3.14.3.tgz", - "integrity": "sha512-cL8JBEDAm4UeVjTN5QnFl8QgMGUwxFn1GvQvu3RtfAHUrAPRahGihrsWpKnEK9L0QMqsAPk/R8MylMWKHaK33A==", + "node_modules/@parcel/watcher-linux-arm64-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.1.tgz", + "integrity": "sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg==", + "cpu": [ + "arm64" + ], "license": "MIT", - "dependencies": { - "@vue/reactivity": "~3.1.1" + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/ansi-regex": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", - "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "node_modules/@parcel/watcher-linux-x64-glibc": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.1.tgz", + "integrity": "sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==", + "cpu": [ + "x64" + ], "license": "MIT", + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=12" + "node": ">= 10.0.0" }, "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "node_modules/@parcel/watcher-linux-x64-musl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.1.tgz", + "integrity": "sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg==", + "cpu": [ + "x64" + ], "license": "MIT", + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=12" + "node": ">= 10.0.0" }, "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", - "license": "MIT" - }, - "node_modules/anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "license": "ISC", - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, + "node_modules/@parcel/watcher-win32-arm64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.1.tgz", + "integrity": "sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], "engines": { - "node": ">= 8" + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", - "license": "MIT" - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, - "license": "Python-2.0" - }, - "node_modules/autoprefixer": { - "version": "10.4.20", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", - "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/autoprefixer" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } + "node_modules/@parcel/watcher-win32-ia32": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.1.tgz", + "integrity": "sha512-c2KkcVN+NJmuA7CGlaGD1qJh1cLfDnQsHjE89E60vUEMlqduHGCdCLJCID5geFVM0dOtA3ZiIO8BoEQmzQVfpQ==", + "cpu": [ + "ia32" ], "license": "MIT", - "dependencies": { - "browserslist": "^4.23.3", - "caniuse-lite": "^1.0.30001646", - "fraction.js": "^4.3.7", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.1", - "postcss-value-parser": "^4.2.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - }, + "optional": true, + "os": [ + "win32" + ], "engines": { - "node": "^10 || ^12 || >=14" + "node": ">= 10.0.0" }, - "peerDependencies": { - "postcss": "^8.1.0" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "license": "MIT" - }, - "node_modules/binary-extensions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", - "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "node_modules/@parcel/watcher-win32-x64": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.1.tgz", + "integrity": "sha512-9lHBdJITeNR++EvSQVUcaZoWupyHfXe1jZvGZ06O/5MflPcuPLtEphScIBL+AiCWBO46tDSHzWyD0uDmmZqsgA==", + "cpu": [ + "x64" + ], "license": "MIT", + "optional": true, + "os": [ + "win32" + ], "engines": { - "node": ">=8" + "node": ">= 10.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "node_modules/@tailwindcss/cli": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/cli/-/cli-4.2.1.tgz", + "integrity": "sha512-b7MGn51IA80oSG+7fuAgzfQ+7pZBgjzbqwmiv6NO7/+a1sev32cGqnwhscT7h0EcAvMa9r7gjRylqOH8Xhc4DA==", "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0" + "@parcel/watcher": "^2.5.1", + "@tailwindcss/node": "4.2.1", + "@tailwindcss/oxide": "4.2.1", + "enhanced-resolve": "^5.19.0", + "mri": "^1.2.0", + "picocolors": "^1.1.1", + "tailwindcss": "4.2.1" + }, + "bin": { + "tailwindcss": "dist/index.mjs" } }, - "node_modules/braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "node_modules/@tailwindcss/node": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.2.1.tgz", + "integrity": "sha512-jlx6sLk4EOwO6hHe1oCGm1Q4AN/s0rSrTTPBGPM0/RQ6Uylwq17FuU8IeJJKEjtc6K6O07zsvP+gDO6MMWo7pg==", "license": "MIT", "dependencies": { - "fill-range": "^7.1.1" + "@jridgewell/remapping": "^2.3.5", + "enhanced-resolve": "^5.19.0", + "jiti": "^2.6.1", + "lightningcss": "1.31.1", + "magic-string": "^0.30.21", + "source-map-js": "^1.2.1", + "tailwindcss": "4.2.1" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.2.1.tgz", + "integrity": "sha512-yv9jeEFWnjKCI6/T3Oq50yQEOqmpmpfzG1hcZsAOaXFQPfzWprWrlHSdGPEF3WQTi8zu8ohC9Mh9J470nT5pUw==", + "license": "MIT", + "engines": { + "node": ">= 20" }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.2.1", + "@tailwindcss/oxide-darwin-arm64": "4.2.1", + "@tailwindcss/oxide-darwin-x64": "4.2.1", + "@tailwindcss/oxide-freebsd-x64": "4.2.1", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.1", + "@tailwindcss/oxide-linux-arm64-gnu": "4.2.1", + "@tailwindcss/oxide-linux-arm64-musl": "4.2.1", + "@tailwindcss/oxide-linux-x64-gnu": "4.2.1", + "@tailwindcss/oxide-linux-x64-musl": "4.2.1", + "@tailwindcss/oxide-wasm32-wasi": "4.2.1", + "@tailwindcss/oxide-win32-arm64-msvc": "4.2.1", + "@tailwindcss/oxide-win32-x64-msvc": "4.2.1" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.2.1.tgz", + "integrity": "sha512-eZ7G1Zm5EC8OOKaesIKuw77jw++QJ2lL9N+dDpdQiAB/c/B2wDh0QPFHbkBVrXnwNugvrbJFk1gK2SsVjwWReg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], "engines": { - "node": ">=8" + "node": ">= 20" } }, - "node_modules/browserslist": { - "version": "4.24.2", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz", - "integrity": "sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.2.1.tgz", + "integrity": "sha512-q/LHkOstoJ7pI1J0q6djesLzRvQSIfEto148ppAd+BVQK0JYjQIFSK3JgYZJa+Yzi0DDa52ZsQx2rqytBnf8Hw==", + "cpu": [ + "arm64" ], "license": "MIT", - "dependencies": { - "caniuse-lite": "^1.0.30001669", - "electron-to-chromium": "^1.5.41", - "node-releases": "^2.0.18", - "update-browserslist-db": "^1.1.1" - }, - "bin": { - "browserslist": "cli.js" - }, + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + "node": ">= 20" } }, - "node_modules/camelcase-css": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", - "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.2.1.tgz", + "integrity": "sha512-/f/ozlaXGY6QLbpvd/kFTro2l18f7dHKpB+ieXz+Cijl4Mt9AI2rTrpq7V+t04nK+j9XBQHnSMdeQRhbGyt6fw==", + "cpu": [ + "x64" + ], "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": ">= 6" + "node": ">= 20" } }, - "node_modules/caniuse-lite": { - "version": "1.0.30001680", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001680.tgz", - "integrity": "sha512-rPQy70G6AGUMnbwS1z6Xg+RkHYPAi18ihs47GH0jcxIG7wArmPgY3XbS2sRdBbxJljp3thdT8BIqv9ccCypiPA==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.2.1.tgz", + "integrity": "sha512-5e/AkgYJT/cpbkys/OU2Ei2jdETCLlifwm7ogMC7/hksI2fC3iiq6OcXwjibcIjPung0kRtR3TxEITkqgn0TcA==", + "cpu": [ + "x64" ], - "license": "CC-BY-4.0" - }, - "node_modules/chokidar": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", - "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", "license": "MIT", - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, + "optional": true, + "os": [ + "freebsd" + ], "engines": { - "node": ">= 8.10.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" + "node": ">= 20" } }, - "node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.2.1.tgz", + "integrity": "sha512-Uny1EcVTTmerCKt/1ZuKTkb0x8ZaiuYucg2/kImO5A5Y/kBz41/+j0gxUZl+hTF3xkWpDmHX+TaWhOtba2Fyuw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=12" + "node": ">= 20" } }, - "node_modules/cliui/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.2.1.tgz", + "integrity": "sha512-CTrwomI+c7n6aSSQlsPL0roRiNMDQ/YzMD9EjcR+H4f0I1SQ8QqIuPnsVp7QgMkC1Qi8rtkekLkOFjo7OlEFRQ==", + "cpu": [ + "arm64" + ], "license": "MIT", + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=8" + "node": ">= 20" } }, - "node_modules/cliui/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.2.1.tgz", + "integrity": "sha512-WZA0CHRL/SP1TRbA5mp9htsppSEkWuQ4KsSUumYQnyl8ZdT39ntwqmz4IUHGN6p4XdSlYfJwM4rRzZLShHsGAQ==", + "cpu": [ + "arm64" + ], "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "node": ">= 20" } }, - "node_modules/cliui/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/cliui/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.2.1.tgz", + "integrity": "sha512-qMFzxI2YlBOLW5PhblzuSWlWfwLHaneBE0xHzLrBgNtqN6mWfs+qYbhryGSXQjFYB1Dzf5w+LN5qbUTPhW7Y5g==", + "cpu": [ + "x64" + ], "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=8" + "node": ">= 20" } }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.2.1.tgz", + "integrity": "sha512-5r1X2FKnCMUPlXTWRYpHdPYUY6a1Ar/t7P24OuiEdEOmms5lyqjDRvVY1yy9Rmioh+AunQ0rWiOTPE8F9A3v5g==", + "cpu": [ + "x64" + ], "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=8" + "node": ">= 20" } }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.2.1.tgz", + "integrity": "sha512-MGFB5cVPvshR85MTJkEvqDUnuNoysrsRxd6vnk1Lf2tbiqNlXpHYZqkqOQalydienEWOHHFyyuTSYRsLfxFJ2Q==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], "license": "MIT", + "optional": true, "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" + "@emnapi/core": "^1.8.1", + "@emnapi/runtime": "^1.8.1", + "@emnapi/wasi-threads": "^1.1.0", + "@napi-rs/wasm-runtime": "^1.1.1", + "@tybys/wasm-util": "^0.10.1", + "tslib": "^2.8.1" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "node": ">=14.0.0" } }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.1.tgz", + "integrity": "sha512-YlUEHRHBGnCMh4Nj4GnqQyBtsshUPdiNroZj8VPkvTZSoHsilRCwXcVKnG9kyi0ZFAS/3u+qKHBdDc81SADTRA==", + "cpu": [ + "arm64" + ], "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, + "optional": true, + "os": [ + "win32" + ], "engines": { - "node": ">=7.0.0" + "node": ">= 20" } }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "license": "MIT" - }, - "node_modules/commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.2.1.tgz", + "integrity": "sha512-rbO34G5sMWWyrN/idLeVxAZgAKWrn5LiR3/I90Q9MkA67s6T1oB0xtTe+0heoBvHSpbU9Mk7i6uwJnpo4u21XQ==", + "cpu": [ + "x64" + ], "license": "MIT", + "optional": true, + "os": [ + "win32" + ], "engines": { - "node": ">= 6" + "node": ">= 20" } }, - "node_modules/cross-spawn": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.5.tgz", - "integrity": "sha512-ZVJrKKYunU38/76t0RMOulHOnUcbU9GbpWKAOZ0mhjr7CX6FVrH+4FrAapSOekrgFQ3f/8gwMEuIft0aKq6Hug==", + "node_modules/@tailwindcss/typography": { + "version": "0.5.19", + "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.19.tgz", + "integrity": "sha512-w31dd8HOx3k9vPtcQh5QHP9GwKcgbMp87j58qi6xgiBnFFtKEAgCWnDw4qUT8aHwkCp8bKvb/KGKWWHedP0AAg==", "license": "MIT", "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "license": "MIT", - "bin": { - "cssesc": "bin/cssesc" + "postcss-selector-parser": "6.0.10" }, - "engines": { - "node": ">=4" + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1" } }, - "node_modules/dependency-graph": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-0.11.0.tgz", - "integrity": "sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==", + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "dev": true, "license": "MIT", - "engines": { - "node": ">= 0.6.0" + "dependencies": { + "@types/ms": "*" } }, - "node_modules/didyoumean": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", - "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", - "license": "Apache-2.0" + "node_modules/@types/katex": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@types/katex/-/katex-0.16.7.tgz", + "integrity": "sha512-HMwFiRujE5PjrgwHQ25+bsLJgowjGjm5Z8FVSf0N6PwgJrwxH0QxzHYDcKsTfV3wva0vzrpqMTJS2jXPr5BMEQ==", + "dev": true, + "license": "MIT" }, - "node_modules/dlv": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", - "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "dev": true, "license": "MIT" }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "dev": true, "license": "MIT" }, - "node_modules/electron-to-chromium": { - "version": "1.5.62", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.62.tgz", - "integrity": "sha512-t8c+zLmJHa9dJy96yBZRXGQYoiCEnHYgFwn1asvSPZSUdVxnB62A4RASd7k41ytG3ErFBA0TpHlKg9D9SQBmLg==", - "license": "ISC" + "node_modules/@vue/reactivity": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.1.5.tgz", + "integrity": "sha512-1tdfLmNjWG6t/CsPldh+foumYFo3cpyCHgBYQ34ylaMsJ+SNHQ1kApMIa8jN+i593zQuaw3AdWH0nJTARzCFhg==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.1.5" + } }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "node_modules/@vue/shared": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.1.5.tgz", + "integrity": "sha512-oJ4F3TnvpXaQwZJNF3ZK+kLPHKarDmJjJ6jyzVNDKH9md1dptjC7lWR//jrGuLdek/U6iltWxqAnYOu8gCiOvA==", "license": "MIT" }, - "node_modules/entities": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", - "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" + "node_modules/alpinejs": { + "version": "3.15.8", + "resolved": "https://registry.npmjs.org/alpinejs/-/alpinejs-3.15.8.tgz", + "integrity": "sha512-zxIfCRTBGvF1CCLIOMQOxAyBuqibxSEwS6Jm1a3HGA9rgrJVcjEWlwLcQTVGAWGS8YhAsTRLVrtQ5a5QT9bSSQ==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "~3.1.1" } }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, "license": "MIT", "engines": { - "node": ">=6" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" } }, - "node_modules/fast-glob": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", - "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "license": "MIT", "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" + "fill-range": "^7.1.1" }, "engines": { - "node": ">=8.6.0" + "node": ">=8" } }, - "node_modules/fastq": { - "version": "1.17.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", - "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", - "license": "ISC", - "dependencies": { - "reusify": "^1.0.4" + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "dev": true, "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/focus-trap": { - "version": "6.9.4", - "resolved": "https://registry.npmjs.org/focus-trap/-/focus-trap-6.9.4.tgz", - "integrity": "sha512-v2NTsZe2FF59Y+sDykKY+XjqZ0cPfhq/hikWVL88BqLivnNiEffAsac6rP6H45ff9wG9LL5ToiDqrLEP9GX9mw==", + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "dev": true, "license": "MIT", - "dependencies": { - "tabbable": "^5.3.3" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/foreground-child": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", - "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", - "license": "ISC", - "dependencies": { - "cross-spawn": "^7.0.0", - "signal-exit": "^4.0.1" - }, + "node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": ">= 12" } }, - "node_modules/fraction.js": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", - "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", "license": "MIT", - "engines": { - "node": "*" + "bin": { + "cssesc": "bin/cssesc" }, - "funding": { - "type": "patreon", - "url": "https://github.com/sponsors/rawify" + "engines": { + "node": ">=4" } }, - "node_modules/fs-extra": { - "version": "11.2.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", - "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, "license": "MIT", "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" + "ms": "^2.1.3" }, "engines": { - "node": ">=14.14" + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } } }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "hasInstallScript": true, + "node_modules/decode-named-character-reference": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz", + "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==", + "dev": true, "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "dev": true, "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" + "engines": { + "node": ">=6" } }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "license": "ISC", + "node_modules/detect-libc": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", + "integrity": "sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==", + "license": "Apache-2.0", + "bin": { + "detect-libc": "bin/detect-libc.js" + }, "engines": { - "node": "6.* || 8.* || >= 10.*" + "node": ">=0.10" } }, - "node_modules/get-stdin": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-9.0.0.tgz", - "integrity": "sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA==", + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dev": true, "license": "MIT", - "engines": { - "node": ">=12" + "dependencies": { + "dequal": "^2.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", - "license": "ISC", + "node_modules/enhanced-resolve": { + "version": "5.20.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.20.1.tgz", + "integrity": "sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==", + "license": "MIT", "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" + "graceful-fs": "^4.2.4", + "tapable": "^2.3.0" }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "engines": { + "node": ">=10.13.0" } }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "license": "ISC", + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", "dependencies": { - "is-glob": "^4.0.1" + "to-regex-range": "^5.0.1" }, "engines": { - "node": ">= 6" + "node": ">=8" } }, - "node_modules/globby": { - "version": "14.0.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-14.0.2.tgz", - "integrity": "sha512-s3Fq41ZVh7vbbe2PN3nrW7yC7U7MFVc5c98/iTl9c2GawNMKx/J648KQRW6WKkuU8GIbbh2IXfIRQjOZnXcTnw==", + "node_modules/focus-trap": { + "version": "6.9.4", + "resolved": "https://registry.npmjs.org/focus-trap/-/focus-trap-6.9.4.tgz", + "integrity": "sha512-v2NTsZe2FF59Y+sDykKY+XjqZ0cPfhq/hikWVL88BqLivnNiEffAsac6rP6H45ff9wG9LL5ToiDqrLEP9GX9mw==", "license": "MIT", "dependencies": { - "@sindresorhus/merge-streams": "^2.1.0", - "fast-glob": "^3.3.2", - "ignore": "^5.2.4", - "path-type": "^5.0.0", - "slash": "^5.1.0", - "unicorn-magic": "^0.1.0" - }, + "tabbable": "^5.3.3" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz", + "integrity": "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==", + "dev": true, + "license": "MIT", "engines": { "node": ">=18" }, @@ -877,52 +950,50 @@ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", "license": "ISC" }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, + "node_modules/highlight.js": { + "version": "11.11.1", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz", + "integrity": "sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==", + "license": "BSD-3-Clause", "engines": { - "node": ">= 0.4" + "node": ">=12.0.0" } }, - "node_modules/ignore": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "dev": true, "license": "MIT", - "engines": { - "node": ">= 4" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "dev": true, "license": "MIT", "dependencies": { - "binary-extensions": "^2.0.0" + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" }, - "engines": { - "node": ">=8" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/is-core-module": { - "version": "2.15.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.1.tgz", - "integrity": "sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==", + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "dev": true, "license": "MIT", - "dependencies": { - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, "node_modules/is-extglob": { @@ -934,15 +1005,6 @@ "node": ">=0.10.0" } }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -955,6 +1017,17 @@ "node": ">=0.10.0" } }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", @@ -964,532 +1037,942 @@ "node": ">=0.12.0" } }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "license": "ISC" - }, - "node_modules/jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, "node_modules/jiti": { - "version": "1.21.6", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", - "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", "license": "MIT", "bin": { - "jiti": "bin/jiti.js" + "jiti": "lib/jiti-cli.mjs" } }, - "node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "node_modules/katex": { + "version": "0.16.25", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.25.tgz", + "integrity": "sha512-woHRUZ/iF23GBP1dkDQMh1QBad9dmr8/PAwNA54VrSOVYgI12MAcE14TqnDdQOdzyEonGzMepYnqBMYdsoAr8Q==", + "dev": true, + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], "license": "MIT", "dependencies": { - "universalify": "^2.0.0" + "commander": "^8.3.0" }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" + "bin": { + "katex": "cli.js" } }, - "node_modules/lilconfig": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", - "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", - "license": "MIT", + "node_modules/lightningcss": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.31.1.tgz", + "integrity": "sha512-l51N2r93WmGUye3WuFoN5k10zyvrVs0qfKBhyC5ogUQ6Ew6JUSswh78mbSO+IU3nTWsyOArqPCcShdQSadghBQ==", + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, "engines": { - "node": ">=14" + "node": ">= 12.0.0" }, "funding": { - "url": "https://github.com/sponsors/antonk52" - } - }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "license": "MIT" - }, - "node_modules/linkify-it": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-5.0.0.tgz", - "integrity": "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "uc.micro": "^2.0.0" - } - }, - "node_modules/lodash.castarray": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.castarray/-/lodash.castarray-4.4.0.tgz", - "integrity": "sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q==", - "license": "MIT" - }, - "node_modules/lodash.isplainobject": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", - "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==", - "license": "MIT" - }, - "node_modules/lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "license": "MIT" - }, - "node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "license": "ISC" - }, - "node_modules/markdown-it": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.0.tgz", - "integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", - "dev": true, - "license": "MIT", - "dependencies": { - "argparse": "^2.0.1", - "entities": "^4.4.0", - "linkify-it": "^5.0.0", - "mdurl": "^2.0.0", - "punycode.js": "^2.3.1", - "uc.micro": "^2.1.0" - }, - "bin": { - "markdown-it": "bin/markdown-it.mjs" - } - }, - "node_modules/markdownlint": { - "version": "0.35.0", - "resolved": "https://registry.npmjs.org/markdownlint/-/markdownlint-0.35.0.tgz", - "integrity": "sha512-wgp8yesWjFBL7bycA3hxwHRdsZGJhjhyP1dSxKVKrza0EPFYtn+mHtkVy6dvP1kGSjovyG5B8yNP6Frj0UFUJg==", - "dev": true, - "license": "MIT", - "dependencies": { - "markdown-it": "14.1.0", - "markdownlint-micromark": "0.1.10" + "type": "opencollective", + "url": "https://opencollective.com/parcel" }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.31.1", + "lightningcss-darwin-arm64": "1.31.1", + "lightningcss-darwin-x64": "1.31.1", + "lightningcss-freebsd-x64": "1.31.1", + "lightningcss-linux-arm-gnueabihf": "1.31.1", + "lightningcss-linux-arm64-gnu": "1.31.1", + "lightningcss-linux-arm64-musl": "1.31.1", + "lightningcss-linux-x64-gnu": "1.31.1", + "lightningcss-linux-x64-musl": "1.31.1", + "lightningcss-win32-arm64-msvc": "1.31.1", + "lightningcss-win32-x64-msvc": "1.31.1" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.31.1.tgz", + "integrity": "sha512-HXJF3x8w9nQ4jbXRiNppBCqeZPIAfUo8zE/kOEGbW5NZvGc/K7nMxbhIr+YlFlHW5mpbg/YFPdbnCh1wAXCKFg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], "engines": { - "node": ">=18" + "node": ">= 12.0.0" }, "funding": { - "url": "https://github.com/sponsors/DavidAnson" + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/markdownlint-micromark": { - "version": "0.1.10", - "resolved": "https://registry.npmjs.org/markdownlint-micromark/-/markdownlint-micromark-0.1.10.tgz", - "integrity": "sha512-no5ZfdqAdWGxftCLlySHSgddEjyW4kui4z7amQcGsSKfYC5v/ou+8mIQVyg9KQMeEZLNtz9OPDTj7nnTnoR4FQ==", - "dev": true, - "license": "MIT", + "node_modules/lightningcss-darwin-arm64": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.31.1.tgz", + "integrity": "sha512-02uTEqf3vIfNMq3h/z2cJfcOXnQ0GRwQrkmPafhueLb2h7mqEidiCzkE4gBMEH65abHRiQvhdcQ+aP0D0g67sg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": ">=18" + "node": ">= 12.0.0" }, "funding": { - "url": "https://github.com/sponsors/DavidAnson" + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/mdurl": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-2.0.0.tgz", - "integrity": "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==", - "dev": true, - "license": "MIT" - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "license": "MIT", + "node_modules/lightningcss-darwin-x64": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.31.1.tgz", + "integrity": "sha512-1ObhyoCY+tGxtsz1lSx5NXCj3nirk0Y0kB/g8B8DT+sSx4G9djitg9ejFnjb3gJNWo7qXH4DIy2SUHvpoFwfTA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": ">= 8" - } - }, - "node_modules/micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "license": "MIT", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" + "node": ">= 12.0.0" }, - "engines": { - "node": ">=8.6" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.31.1.tgz", + "integrity": "sha512-1RINmQKAItO6ISxYgPwszQE1BrsVU5aB45ho6O42mu96UiZBxEXsuQ7cJW4zs4CEodPUioj/QrXW1r9pLUM74A==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">= 12.0.0" }, "funding": { - "url": "https://github.com/sponsors/isaacs" + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", - "license": "ISC", + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.31.1.tgz", + "integrity": "sha512-OOCm2//MZJ87CdDK62rZIu+aw9gBv4azMJuA8/KB74wmfS3lnC4yoPHm0uXZ/dvNNHmnZnB8XLAZzObeG0nS1g==", + "cpu": [ + "arm" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "license": "MIT", - "dependencies": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.31.1.tgz", + "integrity": "sha512-WKyLWztD71rTnou4xAD5kQT+982wvca7E6QoLpoawZ1gP9JM0GJj4Tp5jMUh9B3AitHbRZ2/H3W5xQmdEOUlLg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.31.1.tgz", + "integrity": "sha512-mVZ7Pg2zIbe3XlNbZJdjs86YViQFoJSpc41CbVmKBPiGmC4YrfeOyz65ms2qpAobVd7WQsbW4PdsSJEMymyIMg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/node-releases": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", - "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==", - "license": "MIT" - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "license": "MIT", + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.31.1.tgz", + "integrity": "sha512-xGlFWRMl+0KvUhgySdIaReQdB4FNudfUTARn7q0hh/V67PVGCs3ADFjw+6++kG1RNd0zdGRlEKa+T13/tQjPMA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=0.10.0" + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", - "license": "MIT", + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.31.1.tgz", + "integrity": "sha512-eowF8PrKHw9LpoZii5tdZwnBcYDxRw2rRCyvAXLi34iyeYfqCQNA9rmUM0ce62NlPhCvof1+9ivRaTY6pSKDaA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=0.10.0" + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "license": "MIT", + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.31.1.tgz", + "integrity": "sha512-aJReEbSEQzx1uBlQizAOBSjcmr9dCdL3XuC/6HLXAxmtErsj2ICo5yYggg1qOODQMtnjNQv2UHb9NpOuFtYe4w==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], "engines": { - "node": ">=0.10.0" + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/object-hash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", - "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", - "license": "MIT", + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.31.1.tgz", + "integrity": "sha512-I9aiFrbd7oYHwlnQDqr1Roz+fTz61oDDJX7n9tYF9FJymH1cIN1DtKw3iYt6b8WZgEjoNwVSncwF4wx/ZedMhw==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], "engines": { - "node": ">= 6" + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" } }, - "node_modules/package-json-from-dist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", - "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", - "license": "BlueOak-1.0.0" - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "license": "MIT", + "node_modules/lightningcss/node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "license": "Apache-2.0", "engines": { "node": ">=8" } }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "license": "MIT" - }, - "node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", - "license": "BlueOak-1.0.0", + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "license": "MIT", "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "@jridgewell/sourcemap-codec": "^1.5.5" } }, - "node_modules/path-type": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-5.0.0.tgz", - "integrity": "sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==", + "node_modules/markdownlint": { + "version": "0.40.0", + "resolved": "https://registry.npmjs.org/markdownlint/-/markdownlint-0.40.0.tgz", + "integrity": "sha512-UKybllYNheWac61Ia7T6fzuQNDZimFIpCg2w6hHjgV1Qu0w1TV0LlSgryUGzM0bkKQCBhy2FDhEELB73Kb0kAg==", + "dev": true, "license": "MIT", - "engines": { - "node": ">=12" + "dependencies": { + "micromark": "4.0.2", + "micromark-core-commonmark": "2.0.3", + "micromark-extension-directive": "4.0.0", + "micromark-extension-gfm-autolink-literal": "2.1.0", + "micromark-extension-gfm-footnote": "2.1.0", + "micromark-extension-gfm-table": "2.1.1", + "micromark-extension-math": "3.1.0", + "micromark-util-types": "2.0.2", + "string-width": "8.1.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/picocolors": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "license": "ISC" - }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "license": "MIT", "engines": { - "node": ">=8.6" + "node": ">=20" }, "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" + "url": "https://github.com/sponsors/DavidAnson" } }, - "node_modules/pirates": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", - "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "node_modules/marked": { + "version": "17.0.4", + "resolved": "https://registry.npmjs.org/marked/-/marked-17.0.4.tgz", + "integrity": "sha512-NOmVMM+KAokHMvjWmC5N/ZOvgmSWuqJB8FoYI019j4ogb/PeRMKoKIjReZ2w3376kkA8dSJIP8uD993Kxc0iRQ==", "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, "engines": { - "node": ">= 6" + "node": ">= 20" } }, - "node_modules/postcss": { - "version": "8.4.49", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz", - "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==", + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "dev": true, "funding": [ { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" }, { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" }, { - "type": "github", - "url": "https://github.com/sponsors/ai" + "type": "OpenCollective", + "url": "https://opencollective.com/unified" } ], "license": "MIT", "dependencies": { - "nanoid": "^3.3.7", - "picocolors": "^1.1.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/postcss-cli": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/postcss-cli/-/postcss-cli-11.0.0.tgz", - "integrity": "sha512-xMITAI7M0u1yolVcXJ9XTZiO9aO49mcoKQy6pCDFdMh9kGqhzLVpWxeD/32M/QBmkhcGypZFFOLNLmIW4Pg4RA==", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-4.0.0.tgz", + "integrity": "sha512-/C2nqVmXXmiseSSuCdItCMho7ybwwop6RrrRPk0KbOHW21JKoCldC+8rFOaundDoRBUWBnJJcxeA/Kvi34WQXg==", + "dev": true, "license": "MIT", "dependencies": { - "chokidar": "^3.3.0", - "dependency-graph": "^0.11.0", - "fs-extra": "^11.0.0", - "get-stdin": "^9.0.0", - "globby": "^14.0.0", - "picocolors": "^1.0.0", - "postcss-load-config": "^5.0.0", - "postcss-reporter": "^7.0.0", - "pretty-hrtime": "^1.0.3", - "read-cache": "^1.0.0", - "slash": "^5.0.0", - "yargs": "^17.0.0" - }, - "bin": { - "postcss": "index.js" - }, - "engines": { - "node": ">=18" + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "parse-entities": "^4.0.0" }, - "peerDependencies": { - "postcss": "^8.0.0" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/postcss-import": { - "version": "16.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-16.1.0.tgz", - "integrity": "sha512-7hsAZ4xGXl4MW+OKEWCnF6T5jqBw80/EE9aXg1r2yyn1RsVEU8EtKXbijEODa+rg7iih4bKf7vlvTGYR4CnPNg==", + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "dev": true, "license": "MIT", "dependencies": { - "postcss-value-parser": "^4.0.0", - "read-cache": "^1.0.0", - "resolve": "^1.1.7" - }, - "engines": { - "node": ">=18.0.0" + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" }, - "peerDependencies": { - "postcss": "^8.0.0" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/postcss-js": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", - "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "dev": true, "license": "MIT", "dependencies": { - "camelcase-css": "^2.0.1" - }, - "engines": { - "node": "^12 || ^14 || >= 16" + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.4.21" + "url": "https://opencollective.com/unified" } }, - "node_modules/postcss-load-config": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-5.1.0.tgz", - "integrity": "sha512-G5AJ+IX0aD0dygOE0yFZQ/huFFMSNneyfp0e3/bT05a8OfPC5FUoZRPfGijUdGOJNMewJiwzcHJXFafFzeKFVA==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "dev": true, "license": "MIT", "dependencies": { - "lilconfig": "^3.1.1", - "yaml": "^2.4.2" + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" }, - "engines": { - "node": ">= 18" - }, - "peerDependencies": { - "jiti": ">=1.21.0", - "postcss": ">=8.0.9", - "tsx": "^4.8.1" - }, - "peerDependenciesMeta": { - "jiti": { - "optional": true - }, - "postcss": { - "optional": true - }, - "tsx": { - "optional": true - } + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/postcss-nested": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-5.0.6.tgz", - "integrity": "sha512-rKqm2Fk0KbA8Vt3AdGN0FB9OBOMDVajMG6ZCf/GoHgdxUJ4sBFp0A/uMIRm+MJUdo33YXEtjqIz8u7DAp8B7DA==", + "node_modules/micromark-extension-math": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-math/-/micromark-extension-math-3.1.0.tgz", + "integrity": "sha512-lvEqd+fHjATVs+2v/8kg9i5Q0AP2k85H0WUOwpIVvUML8BapsMvh1XAogmQjOCsLpoKRCVQqEkQBB3NhVBcsOg==", + "dev": true, "license": "MIT", "dependencies": { - "postcss-selector-parser": "^6.0.6" - }, - "engines": { - "node": ">=12.0" + "@types/katex": "^0.16.0", + "devlop": "^1.0.0", + "katex": "^0.16.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.2.14" + "url": "https://opencollective.com/unified" } }, - "node_modules/postcss-reporter": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/postcss-reporter/-/postcss-reporter-7.1.0.tgz", - "integrity": "sha512-/eoEylGWyy6/DOiMP5lmFRdmDKThqgn7D6hP2dXKJI/0rJSO1ADFNngZfDzxL0YAxFvws+Rtpuji1YIHj4mySA==", + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "dev": true, "funding": [ { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" }, { - "type": "github", - "url": "https://github.com/sponsors/ai" + "type": "OpenCollective", + "url": "https://opencollective.com/unified" } ], "license": "MIT", "dependencies": { - "picocolors": "^1.0.0", - "thenby": "^1.3.4" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "postcss": "^8.1.0" + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mri": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", + "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-addon-api": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", + "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", + "license": "MIT" + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" } }, "node_modules/postcss-selector-parser": { @@ -1505,16 +1988,10 @@ "node": ">=4" } }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", - "license": "MIT" - }, "node_modules/prettier": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", - "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.1.tgz", + "integrity": "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==", "dev": true, "license": "MIT", "bin": { @@ -1544,37 +2021,43 @@ } }, "node_modules/prettier-plugin-tailwindcss": { - "version": "0.6.8", - "resolved": "https://registry.npmjs.org/prettier-plugin-tailwindcss/-/prettier-plugin-tailwindcss-0.6.8.tgz", - "integrity": "sha512-dGu3kdm7SXPkiW4nzeWKCl3uoImdd5CTZEJGxyypEPL37Wj0HT2pLqjrvSei1nTeuQfO4PUfjeW5cTUNRLZ4sA==", + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/prettier-plugin-tailwindcss/-/prettier-plugin-tailwindcss-0.7.2.tgz", + "integrity": "sha512-LkphyK3Fw+q2HdMOoiEHWf93fNtYJwfamoKPl7UwtjFQdei/iIBoX11G6j706FzN3ymX9mPVi97qIY8328vdnA==", "dev": true, "license": "MIT", "engines": { - "node": ">=14.21.3" + "node": ">=20.19" }, "peerDependencies": { "@ianvs/prettier-plugin-sort-imports": "*", + "@prettier/plugin-hermes": "*", + "@prettier/plugin-oxc": "*", "@prettier/plugin-pug": "*", "@shopify/prettier-plugin-liquid": "*", "@trivago/prettier-plugin-sort-imports": "*", - "@zackad/prettier-plugin-twig-melody": "*", + "@zackad/prettier-plugin-twig": "*", "prettier": "^3.0", "prettier-plugin-astro": "*", "prettier-plugin-css-order": "*", - "prettier-plugin-import-sort": "*", "prettier-plugin-jsdoc": "*", "prettier-plugin-marko": "*", "prettier-plugin-multiline-arrays": "*", "prettier-plugin-organize-attributes": "*", "prettier-plugin-organize-imports": "*", "prettier-plugin-sort-imports": "*", - "prettier-plugin-style-order": "*", "prettier-plugin-svelte": "*" }, "peerDependenciesMeta": { "@ianvs/prettier-plugin-sort-imports": { "optional": true }, + "@prettier/plugin-hermes": { + "optional": true + }, + "@prettier/plugin-oxc": { + "optional": true + }, "@prettier/plugin-pug": { "optional": true }, @@ -1584,7 +2067,7 @@ "@trivago/prettier-plugin-sort-imports": { "optional": true }, - "@zackad/prettier-plugin-twig-melody": { + "@zackad/prettier-plugin-twig": { "optional": true }, "prettier-plugin-astro": { @@ -1593,9 +2076,6 @@ "prettier-plugin-css-order": { "optional": true }, - "prettier-plugin-import-sort": { - "optional": true - }, "prettier-plugin-jsdoc": { "optional": true }, @@ -1614,176 +2094,9 @@ "prettier-plugin-sort-imports": { "optional": true }, - "prettier-plugin-style-order": { + "prettier-plugin-svelte": { "optional": true - }, - "prettier-plugin-svelte": { - "optional": true - } - } - }, - "node_modules/pretty-hrtime": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz", - "integrity": "sha512-66hKPCr+72mlfiSjlEB1+45IjXSqvVAIy6mocupoww4tBFE9R9IhwwUGoI4G++Tc9Aq+2rxOt0RFU6gPcrte0A==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/punycode.js": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode.js/-/punycode.js-2.3.1.tgz", - "integrity": "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/read-cache": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", - "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", - "license": "MIT", - "dependencies": { - "pify": "^2.3.0" - } - }, - "node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "license": "MIT", - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/resolve": { - "version": "1.22.8", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", - "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", - "license": "MIT", - "dependencies": { - "is-core-module": "^2.13.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "license": "MIT", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/slash": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz", - "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==", - "license": "MIT", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/source-map-js": { @@ -1796,71 +2109,30 @@ } }, "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.1.0.tgz", + "integrity": "sha512-Kxl3KJGb/gxkaUMOjRsQ8IrXiGW75O4E3RPjFIINOVH8AMl2SQ/yWdTzWwF3FevIX9LcMAjJW+GRwAlAbTSXdg==", + "dev": true, "license": "MIT", "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" + "get-east-asian-width": "^1.3.0", + "strip-ansi": "^7.1.0" }, "engines": { - "node": ">=12" + "node": ">=20" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/string-width-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "dev": true, "license": "MIT", "dependencies": { - "ansi-regex": "^6.0.1" + "ansi-regex": "^6.2.2" }, "engines": { "node": ">=12" @@ -1869,62 +2141,6 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, - "node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/sucrase": { - "version": "3.35.0", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", - "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", - "license": "MIT", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.2", - "commander": "^4.0.0", - "glob": "^10.3.10", - "lines-and-columns": "^1.1.6", - "mz": "^2.7.0", - "pirates": "^4.0.1", - "ts-interface-checker": "^0.1.9" - }, - "bin": { - "sucrase": "bin/sucrase", - "sucrase-node": "bin/sucrase-node" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/tabbable": { "version": "5.3.3", "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-5.3.3.tgz", @@ -1932,190 +2148,22 @@ "license": "MIT" }, "node_modules/tailwindcss": { - "version": "3.4.15", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.15.tgz", - "integrity": "sha512-r4MeXnfBmSOuKUWmXe6h2CcyfzJCEk4F0pptO5jlnYSIViUkVmsawj80N5h2lO3gwcmSb4n3PuN+e+GC1Guylw==", - "license": "MIT", - "dependencies": { - "@alloc/quick-lru": "^5.2.0", - "arg": "^5.0.2", - "chokidar": "^3.6.0", - "didyoumean": "^1.2.2", - "dlv": "^1.1.3", - "fast-glob": "^3.3.2", - "glob-parent": "^6.0.2", - "is-glob": "^4.0.3", - "jiti": "^1.21.6", - "lilconfig": "^2.1.0", - "micromatch": "^4.0.8", - "normalize-path": "^3.0.0", - "object-hash": "^3.0.0", - "picocolors": "^1.1.1", - "postcss": "^8.4.47", - "postcss-import": "^15.1.0", - "postcss-js": "^4.0.1", - "postcss-load-config": "^4.0.2", - "postcss-nested": "^6.2.0", - "postcss-selector-parser": "^6.1.2", - "resolve": "^1.22.8", - "sucrase": "^3.35.0" - }, - "bin": { - "tailwind": "lib/cli.js", - "tailwindcss": "lib/cli.js" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/tailwindcss/node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/tailwindcss/node_modules/lilconfig": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", - "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/tailwindcss/node_modules/postcss-import": { - "version": "15.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", - "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.0.0", - "read-cache": "^1.0.0", - "resolve": "^1.1.7" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "postcss": "^8.0.0" - } - }, - "node_modules/tailwindcss/node_modules/postcss-load-config": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz", - "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "lilconfig": "^3.0.0", - "yaml": "^2.3.4" - }, - "engines": { - "node": ">= 14" - }, - "peerDependencies": { - "postcss": ">=8.0.9", - "ts-node": ">=9.0.0" - }, - "peerDependenciesMeta": { - "postcss": { - "optional": true - }, - "ts-node": { - "optional": true - } - } + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.1.tgz", + "integrity": "sha512-/tBrSQ36vCleJkAOsy9kbNTgaxvGbyOamC30PRePTQe/o1MFwEKHQk4Cn7BNGaPtjp+PuUrByJehM1hgxfq4sw==", + "license": "MIT" }, - "node_modules/tailwindcss/node_modules/postcss-load-config/node_modules/lilconfig": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", - "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", + "node_modules/tapable": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", "license": "MIT", "engines": { - "node": ">=14" + "node": ">=6" }, "funding": { - "url": "https://github.com/sponsors/antonk52" - } - }, - "node_modules/tailwindcss/node_modules/postcss-nested": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", - "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.1.1" - }, - "engines": { - "node": ">=12.0" - }, - "peerDependencies": { - "postcss": "^8.2.14" - } - }, - "node_modules/tailwindcss/node_modules/postcss-selector-parser": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", - "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/thenby": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/thenby/-/thenby-1.3.4.tgz", - "integrity": "sha512-89Gi5raiWA3QZ4b2ePcEwswC3me9JIg+ToSgtE0JWeCynLnLxNr/f9G+xfo9K+Oj4AFdom8YNJjibIARTJmapQ==", - "license": "Apache-2.0" - }, - "node_modules/thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "license": "MIT", - "dependencies": { - "any-promise": "^1.0.0" - } - }, - "node_modules/thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "license": "MIT", - "dependencies": { - "thenify": ">= 3.1.0 < 4" - }, - "engines": { - "node": ">=0.8" + "type": "opencollective", + "url": "https://opencollective.com/webpack" } }, "node_modules/to-regex-range": { @@ -2130,19 +2178,6 @@ "node": ">=8.0" } }, - "node_modules/ts-interface-checker": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", - "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", - "license": "Apache-2.0" - }, - "node_modules/uc.micro": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-2.1.0.tgz", - "integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==", - "dev": true, - "license": "MIT" - }, "node_modules/ulid": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/ulid/-/ulid-2.3.0.tgz", @@ -2153,257 +2188,11 @@ "ulid": "bin/cli.js" } }, - "node_modules/unicorn-magic": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", - "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/update-browserslist-db": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz", - "integrity": "sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "escalade": "^3.2.0", - "picocolors": "^1.1.0" - }, - "bin": { - "update-browserslist-db": "cli.js" - }, - "peerDependencies": { - "browserslist": ">= 4.21.0" - } - }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", "license": "MIT" - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/wrap-ansi-cjs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/yaml": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.6.0.tgz", - "integrity": "sha512-a6ae//JvKDEra2kdi1qzCyrJW/WZCgFi8ydDV+eXExl95t+5R+ijnqHJbz9tmMh8FUjx3iv2fCQ4dclAQlO2UQ==", - "license": "ISC", - "bin": { - "yaml": "bin.mjs" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "license": "MIT", - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/yargs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } } } } diff --git a/package.json b/package.json index ad4c4627b52..4a7eae74b61 100644 --- a/package.json +++ b/package.json @@ -14,24 +14,22 @@ }, "homepage": "https://docs.docker.com/", "dependencies": { - "@alpinejs/collapse": "^3.14.3", - "@alpinejs/focus": "^3.14.3", - "@alpinejs/persist": "^3.14.3", - "@floating-ui/dom": "^1.6.12", - "@material-symbols/svg-400": "^0.23.0", - "@tailwindcss/nesting": "^0.0.0-insiders.565cd3e", - "@tailwindcss/typography": "^0.5.15", - "alpinejs": "^3.14.3", - "autoprefixer": "^10.4.20", - "postcss": "^8.4.49", - "postcss-cli": "^11.0.0", - "postcss-import": "^16.1.0", - "tailwindcss": "^3.4.15" + "@alpinejs/collapse": "3.15.8", + "@alpinejs/focus": "3.15.8", + "@alpinejs/persist": "3.15.8", + "@floating-ui/dom": "1.7.6", + "@material-symbols/svg-400": "0.40.2", + "@tailwindcss/cli": "4.2.1", + "@tailwindcss/typography": "0.5.19", + "alpinejs": "3.15.8", + "highlight.js": "11.11.1", + "marked": "17.0.4", + "tailwindcss": "4.2.1" }, "devDependencies": { - "markdownlint": "^0.35.0", - "prettier": "^3.3.3", - "prettier-plugin-go-template": "^0.0.15", - "prettier-plugin-tailwindcss": "^0.6.8" + "markdownlint": "0.40.0", + "prettier": "3.8.1", + "prettier-plugin-go-template": "0.0.15", + "prettier-plugin-tailwindcss": "0.7.2" } } diff --git a/postcss.config.js b/postcss.config.js deleted file mode 100644 index c50d4a38bce..00000000000 --- a/postcss.config.js +++ /dev/null @@ -1,8 +0,0 @@ -module.exports = { - plugins: { - "postcss-import": {}, - "tailwindcss/nesting": {}, - tailwindcss: {}, - ...(process.env.NODE_ENV === "production" ? { autoprefixer: {} } : {}), - }, -}; diff --git a/static/.well-known/mcp/server-card.json b/static/.well-known/mcp/server-card.json new file mode 100644 index 00000000000..6b21ff42104 --- /dev/null +++ b/static/.well-known/mcp/server-card.json @@ -0,0 +1,23 @@ +{ + "serverInfo": { + "name": "llms-txt", + "version": "1.26.0", + "description": "Official Docker documentation MCP server. Exposes a fetch_docker_docs tool that returns the docs.docker.com content index." + }, + "transport": { + "type": "http", + "url": "https://mcp-docs.docker.com/mcp" + }, + "capabilities": { + "prompts": { + "listChanged": false + }, + "resources": { + "subscribe": false, + "listChanged": false + }, + "tools": { + "listChanged": false + } + } +} diff --git a/static/apple-touch-icon.png b/static/apple-touch-icon.png new file mode 100644 index 00000000000..e25989d1514 Binary files /dev/null and b/static/apple-touch-icon.png differ diff --git a/static/assets/images/app-wf-dark-1.svg b/static/assets/images/app-wf-dark-1.svg deleted file mode 100644 index d7fa9b3f240..00000000000 --- a/static/assets/images/app-wf-dark-1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - diff --git a/static/assets/images/app-wf-dark-2.svg b/static/assets/images/app-wf-dark-2.svg deleted file mode 100644 index fb042506986..00000000000 --- a/static/assets/images/app-wf-dark-2.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/static/assets/images/app-wf-light-1.svg b/static/assets/images/app-wf-light-1.svg deleted file mode 100644 index 07c366653c0..00000000000 --- a/static/assets/images/app-wf-light-1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - diff --git a/static/assets/images/app-wf-light-2.svg b/static/assets/images/app-wf-light-2.svg deleted file mode 100644 index c4eb3adf48c..00000000000 --- a/static/assets/images/app-wf-light-2.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/static/assets/images/apple_48.svg b/static/assets/images/apple_48.svg deleted file mode 100644 index 49143ccbd66..00000000000 --- a/static/assets/images/apple_48.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - apple_48 - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/static/assets/images/banner_image_24512.png b/static/assets/images/banner_image_24512.png deleted file mode 100644 index 9b580a20b56..00000000000 Binary files a/static/assets/images/banner_image_24512.png and /dev/null differ diff --git a/static/assets/images/bg-pattern-blue.webp b/static/assets/images/bg-pattern-blue.webp deleted file mode 100644 index ece37ea0fd8..00000000000 Binary files a/static/assets/images/bg-pattern-blue.webp and /dev/null differ diff --git a/static/assets/images/bg-pattern-purple.webp b/static/assets/images/bg-pattern-purple.webp deleted file mode 100644 index fb612ac239b..00000000000 Binary files a/static/assets/images/bg-pattern-purple.webp and /dev/null differ diff --git a/static/assets/images/bg-pattern-verde.webp b/static/assets/images/bg-pattern-verde.webp deleted file mode 100644 index 3cce3982e3a..00000000000 Binary files a/static/assets/images/bg-pattern-verde.webp and /dev/null differ diff --git a/static/assets/images/docker-docs.png b/static/assets/images/docker-docs.png deleted file mode 100644 index 89bebf552cc..00000000000 Binary files a/static/assets/images/docker-docs.png and /dev/null differ diff --git a/static/assets/images/docker-docs.svg b/static/assets/images/docker-docs.svg new file mode 100644 index 00000000000..cf36d6c5e07 --- /dev/null +++ b/static/assets/images/docker-docs.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/static/assets/images/docs-site-feedback.png b/static/assets/images/docs-site-feedback.png deleted file mode 100644 index 6fe4122f32d..00000000000 Binary files a/static/assets/images/docs-site-feedback.png and /dev/null differ diff --git a/static/assets/images/dot-pattern.svg b/static/assets/images/dot-pattern.svg new file mode 100644 index 00000000000..9d112c355e9 --- /dev/null +++ b/static/assets/images/dot-pattern.svg @@ -0,0 +1,2669 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/assets/images/favicon-192x192.png b/static/assets/images/favicon-192x192.png new file mode 100644 index 00000000000..44ea8608135 Binary files /dev/null and b/static/assets/images/favicon-192x192.png differ diff --git a/static/assets/images/favicon-32x32.png b/static/assets/images/favicon-32x32.png new file mode 100644 index 00000000000..f37200f719a Binary files /dev/null and b/static/assets/images/favicon-32x32.png differ diff --git a/static/assets/images/feedback-widget.png b/static/assets/images/feedback-widget.png deleted file mode 100644 index cb5bafd042b..00000000000 Binary files a/static/assets/images/feedback-widget.png and /dev/null differ diff --git a/static/assets/images/footer_moby_icon.png b/static/assets/images/footer_moby_icon.png deleted file mode 100644 index 05898956810..00000000000 Binary files a/static/assets/images/footer_moby_icon.png and /dev/null differ diff --git a/static/assets/images/linux_48.svg b/static/assets/images/linux_48.svg deleted file mode 100644 index b722087326a..00000000000 --- a/static/assets/images/linux_48.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - linux_48 - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/static/assets/images/windows_48.svg b/static/assets/images/windows_48.svg deleted file mode 100644 index 2d5cb4a9cdd..00000000000 --- a/static/assets/images/windows_48.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - windows_48 - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/static/favicon.ico b/static/favicon.ico new file mode 100644 index 00000000000..8e1af6dc996 Binary files /dev/null and b/static/favicon.ico differ diff --git a/tailwind.config.js b/tailwind.config.js deleted file mode 100644 index b71287b50f0..00000000000 --- a/tailwind.config.js +++ /dev/null @@ -1,271 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: ["./hugo_stats.json", "layouts/**/*.html", "assets/**/*.js"], - darkMode: "class", - theme: { - extend: { - typography: (theme) => ({ - DEFAULT: { - css: { - pre: false, - code: false, - 'pre code': false, - 'code::before': false, - 'code::after': false, - blockquote: false, - 'blockquote p:first-of-type::before': false, - 'blockquote p:last-of-type::after': false, - // light colors for prose - "--tw-prose-body": theme("colors.black"), - "--tw-prose-headings": theme("colors.black"), - "--tw-prose-lead": theme("colors.gray.light.600"), - "--tw-prose-links": theme("colors.blue.light.500"), - "--tw-prose-bold": theme("colors.black"), - "--tw-prose-counters": theme("colors.black"), - "--tw-prose-bullets": theme("colors.black"), - "--tw-prose-hr": theme("colors.divider.light"), - "--tw-prose-captions": theme("colors.gray.light.600"), - "--tw-prose-th-borders": theme("colors.gray.light.200"), - "--tw-prose-td-borders": theme("colors.gray.light.200"), - - // dark colors for prose - "--tw-prose-invert-body": theme("colors.white"), - "--tw-prose-invert-headings": theme("colors.white"), - "--tw-prose-invert-lead": theme("colors.gray.dark.600"), - "--tw-prose-invert-links": theme("colors.blue.dark.500"), - "--tw-prose-invert-bold": theme("colors.white"), - "--tw-prose-invert-counters": theme("colors.white"), - "--tw-prose-invert-bullets": theme("colors.white"), - "--tw-prose-invert-hr": theme("colors.divider.dark"), - "--tw-prose-invert-captions": theme("colors.gray.dark.600"), - "--tw-prose-invert-th-borders": theme("colors.gray.dark.200"), - "--tw-prose-invert-td-borders": theme("colors.gray.dark.300"), - }, - }, - }), - }, - - // theme values - fontSize: { - xs: ["0.7143rem", { letterSpacing: "0.015em", fontWeight: 500 }], - sm: "0.851rem", - base: ["14px", {}], - lg: ["1.1429rem", 1.75], - xl: ["1.2857rem", { letterSpacing: "-0.015em", fontWeight: 500 }], - "2xl": ["1.5rem", { letterSpacing: "-0.015em", fontWeight: 500 }], - "3xl": ["2rem", { fontWeight: 500 }], - "4xl": ["2.5rem", { letterSpacing: "-0.015em", fontWeight: 500 }], - }, - - colors: { - white: "#fff", - black: "#000", - transparent: 'transparent', - - background: { - light: "#f9f9fa", - dark: "#141b1f", - }, - - divider: { - light: "hsla(0, 0%, 0%, 0.1)", - dark: "hsla(0, 0%, 100%, 0.05)", - }, - - amber: { - light: { - DEFAULT: "#b85504", - 100: "#fff4dc", - 200: "#fce1a9", - 300: "#fbb552", - 400: "#dd7805", - 500: "#b85504", - 600: "#a9470f", - 700: "#893607", - 800: "#421a02", - }, - dark: { - DEFAULT: "#ed8d25", - 100: "#753715", - 200: "#944307", - 300: "#af560a", - 400: "#cd6a0a", - 500: "#ed8d25", - 600: "#f6a650", - 700: "#f8b974", - 800: "#fac892", - }, - }, - - red: { - light: { - DEFAULT: "#d52536", - 100: "#fdeaea", - 200: "#f6cfd0", - 300: "#eea3a5", - 400: "#e25d68", - 500: "#d52536", - 600: "#b72132", - 700: "#8b1924", - 800: "#350a10", - }, - dark: { - DEFAULT: "#dd4659", - 100: "#741624", - 200: "#951c2f", - 300: "#bc233c", - 400: "#d1334c", - 500: "#dd4659", - 600: "#e96c7c", - 700: "#ea8e9a", - 800: "#f0aab4", - }, - }, - - violet: { - light: { - DEFAULT: "#7d2eff", - 100: "#f7ecff", - 200: "#e9d4ff", - 300: "#c9a6ff", - 400: "#9860ff", - 500: "#7d2eff", - 600: "#6d00eb", - 700: "#5700bb", - 800: "#220041", - }, - dark: { - DEFAULT: "#a371fc", - 100: "#380093", - 200: "#4F00B4", - 300: "#6D1CDB", - 400: "#8a53ec", - 500: "#a371fc", - 600: "#b38bfc", - 700: "#c5a6fd", - 800: "#d4bdfe", - }, - }, - - magenta: { - light: { - DEFAULT: "#C918C0", - 100: "#FFE6FB", - 200: "#FFC9F6", - 300: "#FFA6F0", - 400: "#E950E2", - 500: "#C918C0", - 600: "#AB00A4", - 700: "#830080", - 800: "#440040", - }, - dark: { - DEFAULT: "#E950E2", - 100: "#7E0078", - 200: "#92008B", - 300: "#AB00A4", - 400: "#CC18C4", - 500: "#E950E2", - 600: "#FF6FF9", - 700: "#FF8AFA", - 800: "#FFA4FB", - }, - }, - - - blue: { - light: { - DEFAULT: "#086dd7", - 100: "#e5f2fc", - 200: "#c0e0fa", - 300: "#8bc7f5", - 400: "#1c90ed", - 500: "#1D63ED", - 600: "#0C49C2", - 700: "#00308D", - 800: "#00084d", - }, - dark: { - DEFAULT: "#3391ee", - 100: "#002EA3", - 200: "#063BB7", - 300: "#1351D4", - 400: "#1D63ED", - 500: "#3391ee", - 600: "#55a4f1", - 700: "#7cb9f4", - 800: "#98c8f6", - }, - }, - - green: { - light: { - DEFAULT: "#2e7f74", - 100: "#e6f5f3", - 200: "#c6eae1", - 300: "#88d5c0", - 400: "#3ba08d", - 500: "#2e7f74", - 600: "#1e6c5f", - 700: "#185a51", - 800: "#0c2c28", - }, - dark: { - DEFAULT: "#2aa391", - 100: "#003F36", - 200: "#005045", - 300: "#006256", - 400: "#008471", - 500: "#00A58C", - 600: "#3cc1ad", - 700: "#7accc3", - 800: "#a5ddd6", - }, - }, - - gray: { - light: { - DEFAULT: "#677285", - 100: "#F4F4F6", - 200: "#e1e2e6", - 300: "#c4c8d1", - 400: "#8993a5", - 500: "#677285", - 600: "#505968", - 700: "#393f49", - 800: "#17191e", - }, - dark: { - DEFAULT: "#7794ab", - 100: "#080B0E", - 200: "#1C262D", - 300: "#2D404E", - 400: "#4E6A81", - 500: "#7794ab", - 600: "#94abbc", - 700: "#adbecb", - 800: "#c4d0da", - }, - }, - }, - - fontFamily: { - sans: [ - "Roboto Flex", - "system-ui", - "-apple-system", - "BlinkMacSystemFont", - "Segoe UI", - "Oxygen", - "Ubuntu", - "Cantarell", - "Open Sans", - "Helvetica Neue", - "sans-serif", - ], - mono: ["Roboto Mono", "monospace"], - icons: ["Material Symbols Rounded"], - }, - }, - plugins: [require("@tailwindcss/typography")], -}; diff --git a/tech_writer.yml b/tech_writer.yml new file mode 100644 index 00000000000..b21bcd7b890 --- /dev/null +++ b/tech_writer.yml @@ -0,0 +1,322 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/docker/docker-agent/refs/heads/main/agent-schema.json +agents: + root: + model: coordinator + description: Documentation coordinator for analysis and delegation + instruction: | + + Coordinate documentation work by analyzing requests, discovering existing + content, scoping the work, and delegating to specialists. You analyze and + coordinate. You do not write. + + + + This is the Docker documentation repository (https://docs.docker.com/). + + Repository structure: + - Built with Hugo static site generator + - Content in content/ directory + - Front matter required for all pages + - Style guide enforced by Vale + - Markdown linting enforced by markdownlint + + URL structure: + The /manuals prefix is removed from published URLs. So + content/manuals/docker-desktop/install.md becomes /docker-desktop/install/ + on the live site. + + Vendored content: + Some documentation is vendored from upstream repositories via Hugo + modules: + - CLI reference docs (from docker/cli, docker/buildx, docker/compose, + docker/model-runner) + - Dockerfile reference (from moby/buildkit) + - Engine API docs (from moby/moby) + + Do not edit vendored content. These files are in _vendor/ or are + generated from data/ directories. If vendored content needs updates, + raise this with the user. + + + + 1. Analyze the request + What needs to be documented? What's the scope? + + 2. Discover existing content + Search for related docs. Find what exists, where it lives, and what's + related. + + 3. Read for context + Use filesystem tools to read specific files and understand the current + state. + + 4. Delegate to writer + Provide clear instructions: + - What needs to be written/updated + - Which files are involved + - Related docs to consider + - Any specific requirements + + 5. Delegate to editor + After the writer completes their work, delegate to the editor to + polish, validate, and fix any issues. + + 6. Handle completion + When the editor is done, analyze results: + - Validation passed: Work is complete + - Local issues remain: Delegate back to editor to fix + - Upstream coordination issues: Document for follow-up, don't block + completion + + 7. Complete + When validation passes OR only upstream issues remain, the work is + done. + + + + + Sometimes validation failures indicate upstream work is needed, not + local fixes. There are two types: + + 1. Broken links TO vendored content + Local docs reference upstream content that doesn't exist yet: + - New docs reference CLI flags not in vendored CLI reference yet + - Links to upstream docs that haven't been written yet + - References to features that exist but aren't documented upstream + + 2. Broken links FROM vendored/generated content + The broken link originates in vendored or generated documentation: + - CLI reference pages (generated from data/engine-cli/, data/buildx/) + - Content in _vendor/ directory + - Pages generated from YAML in data/ directory + + These files are read-only in this repo. The broken link must be + fixed in the upstream repository (docker/cli, docker/buildx, + moby/moby), not here. + + When you identify upstream issues: + 1. Verify it's truly an upstream issue (check file path and source) + 2. Note briefly what upstream work is needed (which repo, what needs + fixing) + 3. Do not block completion - if local changes are correct, upstream + work is separate + + How to identify upstream vs local issues: + + Check the SOURCE file path of the broken link: + - Link FROM content/reference/cli/ or content/reference/engine/ → + upstream (generated from data/) + - Link FROM _vendor/ → upstream (vendored content) + - Link FROM content/manuals/ → likely local (check if it's generated) + + Check the TARGET of broken links: + - Link TO /reference/cli/ or /reference/engine/ → likely upstream + (vendored) + - Link TO _vendor/ → definitely upstream (read-only) + + Mapping content to upstream repos: + - CLI reference (docker commands) → docker/cli + - Buildx reference → docker/buildx + - Compose reference → docker/compose + - Model runner reference → docker/model-runner + - Dockerfile reference → moby/buildkit + - Engine API reference → moby/moby + + + + + Work silently without narration. + - No "Let me", "Now I'll", "I'm going to" phrases + - Don't explain before doing - just execute + + Keep communication concise. Report only essential findings and blockers. + + + + - Validation passes (validate tool), OR + - Only upstream issues remain (documented for follow-up) + - Writer and editor have completed their work + - Local documentation changes are correct + + + toolsets: + - type: filesystem + - type: todo + shared: true + - type: fetch + + sub_agents: + - writer + - editor + + writer: + model: writer_sonnet + description: Technical writer for creating and editing Docker documentation + add_prompt_files: + - STYLE.md + - COMPONENTS.md + instruction: | + + Write technical documentation for Docker. Create clear, practical content + that helps users understand and use Docker effectively. + + STYLE.md provides the complete style guide. Follow it. COMPONENTS.md shows + how to use Hugo shortcodes and components. Your job is to create content - + the editor will polish formatting and validate. + + + + You write for the Docker documentation repository (https://docs.docker.com/). + + Technical environment: + - Hugo static site generator with shortcodes + - Markdown with front matter + - Vendored content from upstream repos (read-only, don't edit) + + Division of labor: + - You: Create content, structure information, explain concepts + - Editor: Format, polish style, validate Hugo syntax, run checks + + Don't worry about perfect line wrapping, exact word choices, or Hugo + syntax details. Get the content right - the editor handles the rest. + + + + 1. If updating existing content, read it first to understand scope and + character + 2. Use filesystem tools (glob, grep, read) to find related content and + examples + 3. Write clear, conversational content following STYLE.md principles + 4. Include front matter (title, description, keywords minimum) + 5. Use shortcodes and components from COMPONENTS.md as needed + 6. Write files directly - don't just provide drafts + 7. When done, return to root agent for editor handoff + + + + - Read before editing: Always read existing files before modifying them + - Preserve scope: Match the existing document's length and character (see + STYLE.md "Scope preservation") + - Answer the key questions: Every page should answer "What will I learn?" + and "Why does this matter?" in the first paragraph + - Write, don't narrate: Execute without explaining what you're about to do + + + + Work silently. When returning to coordinator, report briefly: + - Files changed + - Key additions/changes made + - Any concerns + + No "Let me", "Now I'll", "I'm going to" phrases. + + + + - Content written to files (not drafts) + - Follows STYLE.md voice and structure principles + - Uses appropriate components from COMPONENTS.md + - Matches existing document scope when updating + - Includes practical examples where helpful + + + toolsets: + - type: filesystem + - type: shell + + editor: + model: editor_haiku + description: Editor that polishes, validates, and fixes documentation + add_prompt_files: + - STYLE.md + - COMPONENTS.md + instruction: | + + Polish documentation to meet strict formatting and style standards, then + validate it passes all automated checks. + + STYLE.md and COMPONENTS.md contain all the rules. Apply them. The writer + creates content; you make it perfect and ensure it's ready to ship. + + + + You work on the Docker documentation repository (https://docs.docker.com/). + + Your role: + - Fix formatting (line wrapping, prettier) + - Remove AI-isms and style violations (per STYLE.md) + - Ensure correct Hugo syntax (per COMPONENTS.md) + - Validate and fix until checks pass + - Don't change meaning or add content + + + + 1. Run show_diff to see what changed (efficient - avoids reading full + files) + 2. Review diff for issues against STYLE.md and COMPONENTS.md + 3. If issues found: Read file and fix them + 4. Run prettier: npx prettier --write + 5. Run validate tool + 6. Read .validation.log (first 2000 lines, use offset/limit if needed) + 7. If validation passes: Report success + 8. If validation fails: Fix issues and repeat from step 4 + + Key: Always start with show_diff to efficiently review changes before + reading entire files. + + + + - Apply STYLE.md: All voice, grammar, formatting, and terminology rules + - Apply COMPONENTS.md: Correct syntax for shortcodes, front matter, + callouts + - Line wrapping: 80 characters + - Run prettier after editing + - Fix all local issues; identify upstream issues but don't block on them + + + + Work silently. When returning to coordinator, report in 2-3 sentences: + - Validation status (passed/failed) + - Files modified + - Remaining issues (if any) + + No narration, commentary, or detailed explanations. + + + + - Validation passes (validate tool), OR + - Only upstream issues remain (cannot be fixed locally) + - Properly formatted (80 char wrap, prettier run) + - Compliant with STYLE.md and COMPONENTS.md + + + toolsets: + - type: filesystem + - type: shell + - type: script + shell: + show_diff: + cmd: "git diff --unified=5 --color=never" + description: | + Show what changed in modified files (git diff) + Use this FIRST to see what the writer changed before reading full files + More efficient than reading entire files + validate: + cmd: "docker buildx bake validate > .validation.log 2>&1" + description: | + Run documentation validation checks (markdownlint, HTML validation, link checking, structural checks) + Output written to .validation.log - read this file to see results + Note: Vale (prose linting) runs separately in CI and is not included + +models: + coordinator: + provider: anthropic + model: claude-sonnet-4-5 + temperature: 0.3 + writer_sonnet: + provider: anthropic + model: claude-sonnet-4-5 + temperature: 0.6 + editor_haiku: + provider: anthropic + model: claude-haiku-4-5 + temperature: 0.2