mirror of
https://github.com/anchore/syft.git
synced 2025-11-17 08:23:15 +01:00
Compare commits
162 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
365325376a | ||
|
|
153f2321ce | ||
|
|
7bf7bcc461 | ||
|
|
6a21b5e5e2 | ||
|
|
6480c8a425 | ||
|
|
89842bd2f6 | ||
|
|
4a60c41f38 | ||
|
|
2e100f33f3 | ||
|
|
b444f0c2ed | ||
|
|
102d362daf | ||
|
|
66c78d44af | ||
|
|
78a4ab8ced | ||
|
|
25ca33d20e | ||
|
|
60ca241593 | ||
|
|
0f475c8bcd | ||
|
|
199394934d | ||
|
|
8a22d394ed | ||
|
|
bbef262b8f | ||
|
|
4e06a7ab32 | ||
|
|
e5711e9b42 | ||
|
|
f69b1db099 | ||
|
|
fe1ea443c2 | ||
|
|
bfcbf266df | ||
|
|
a400c675fc | ||
|
|
7c154e7c37 | ||
|
|
4c93394bc2 | ||
|
|
3e4e82f03e | ||
|
|
793b0a346f | ||
|
|
a0dac519db | ||
|
|
34f5e521c1 | ||
|
|
774b1e97b9 | ||
|
|
538430d65d | ||
|
|
5db3a9bf55 | ||
|
|
efc2f0012c | ||
|
|
c5c1454848 | ||
|
|
f5c765192c | ||
|
|
728feea620 | ||
|
|
45fb52dca1 | ||
|
|
45bf8b14ab | ||
|
|
9478cd974b | ||
|
|
0d9ea69a66 | ||
|
|
bee78c0b16 | ||
|
|
88bbcbe9c6 | ||
|
|
e0680eb704 | ||
|
|
16f851c5d9 | ||
|
|
d5ca1ad543 | ||
|
|
8be463911c | ||
|
|
44b7b0947c | ||
|
|
675075e882 | ||
|
|
31b2c4c090 | ||
|
|
07029ead8a | ||
|
|
f4de1e863c | ||
|
|
538b4a2194 | ||
|
|
fc74b07369 | ||
|
|
6627c5214c | ||
|
|
c0f32e1dba | ||
|
|
e923db2a94 | ||
|
|
0c98a364d5 | ||
|
|
4343d04652 | ||
|
|
065ac13ab7 | ||
|
|
e9a8bc5ab9 | ||
|
|
6d790ec6ec | ||
|
|
1d5bcc553a | ||
|
|
d22914baf5 | ||
|
|
760bd9a50a | ||
|
|
2d1ada1d00 | ||
|
|
8ffe15c710 | ||
|
|
89948dfa51 | ||
|
|
1a58f27f87 | ||
|
|
450cd72da5 | ||
|
|
5056c7f861 | ||
|
|
4ae8f73583 | ||
|
|
18e789c4fd | ||
|
|
7d4680bc08 | ||
|
|
231f04ae0e | ||
|
|
3b82a3724a | ||
|
|
337a2754e5 | ||
|
|
190f3068d8 | ||
|
|
bd013fe99a | ||
|
|
c732052cf1 | ||
|
|
8f1d45830d | ||
|
|
ea7dc8f468 | ||
|
|
ff6a8b1802 | ||
|
|
a77d24e379 | ||
|
|
b96d3d20af | ||
|
|
5461a92337 | ||
|
|
b9604cbf30 | ||
|
|
9217f2099f | ||
|
|
605a275dd3 | ||
|
|
e1483e0285 | ||
|
|
0a36dabf23 | ||
|
|
64b71ec04c | ||
|
|
8629080e80 | ||
|
|
f0998de717 | ||
|
|
261ab7c1fd | ||
|
|
8232f5bd1b | ||
|
|
21d50d7c31 | ||
|
|
c28b90717b | ||
|
|
323fd3e34c | ||
|
|
af4d19f81d | ||
|
|
d820c3436b | ||
|
|
409642c8f0 | ||
|
|
3abbd940e3 | ||
|
|
22f6f8f880 | ||
|
|
6005fb3c20 | ||
|
|
b87b919149 | ||
|
|
a51994d102 | ||
|
|
333b951be3 | ||
|
|
90c733d24d | ||
|
|
dacc2f61f9 | ||
|
|
06b01aaa40 | ||
|
|
e1762a2dda | ||
|
|
c5cbc89cb1 | ||
|
|
7bc15e3d82 | ||
|
|
c6cd66357a | ||
|
|
04e989d761 | ||
|
|
b6f7532b0f | ||
|
|
2531bfd8cb | ||
|
|
1fcdb67698 | ||
|
|
f986327257 | ||
|
|
67e0f7e3f9 | ||
|
|
169220ba81 | ||
|
|
1df4779b48 | ||
|
|
3a7f1f27a6 | ||
|
|
8e78fd57b8 | ||
|
|
b503690889 | ||
|
|
cc07df0347 | ||
|
|
2b8f4bc028 | ||
|
|
98c97e24a2 | ||
|
|
6f4da8c797 | ||
|
|
647196055d | ||
|
|
39441f1999 | ||
|
|
507987c193 | ||
|
|
7e4bf7f8c2 | ||
|
|
bc18e3ab8c | ||
|
|
c4eb071324 | ||
|
|
cbcf8bd542 | ||
|
|
2d8e337d34 | ||
|
|
13ffeeb3d0 | ||
|
|
170c4c41f4 | ||
|
|
7dc7c01c5c | ||
|
|
9f07fa4a68 | ||
|
|
37b2c0391b | ||
|
|
ada74a8121 | ||
|
|
ca21ccf21d | ||
|
|
26792fc12d | ||
|
|
a433045d51 | ||
|
|
8e51e8d995 | ||
|
|
ba2eb5701f | ||
|
|
c4292ad79b | ||
|
|
0e669faecd | ||
|
|
10ea022fe7 | ||
|
|
87e1d8cb87 | ||
|
|
ab9db0024e | ||
|
|
6b48bd4b5e | ||
|
|
89470ecdd3 | ||
|
|
104df88143 | ||
|
|
80e61175ad | ||
|
|
9f956dca8f | ||
|
|
6452a19009 | ||
|
|
21496e7a81 | ||
|
|
3e5befc267 |
16
.binny.yaml
16
.binny.yaml
@ -2,7 +2,7 @@ tools:
|
||||
# we want to use a pinned version of binny to manage the toolchain (so binny manages itself!)
|
||||
- name: binny
|
||||
version:
|
||||
want: v0.9.0
|
||||
want: v0.10.0
|
||||
method: github-release
|
||||
with:
|
||||
repo: anchore/binny
|
||||
@ -26,7 +26,7 @@ tools:
|
||||
# used for linting
|
||||
- name: golangci-lint
|
||||
version:
|
||||
want: v2.3.1
|
||||
want: v2.6.2
|
||||
method: github-release
|
||||
with:
|
||||
repo: golangci/golangci-lint
|
||||
@ -42,7 +42,7 @@ tools:
|
||||
# used for signing the checksums file at release
|
||||
- name: cosign
|
||||
version:
|
||||
want: v2.5.3
|
||||
want: v3.0.2
|
||||
method: github-release
|
||||
with:
|
||||
repo: sigstore/cosign
|
||||
@ -58,7 +58,7 @@ tools:
|
||||
# used to release all artifacts
|
||||
- name: goreleaser
|
||||
version:
|
||||
want: v2.11.2
|
||||
want: v2.12.7
|
||||
method: github-release
|
||||
with:
|
||||
repo: goreleaser/goreleaser
|
||||
@ -90,7 +90,7 @@ tools:
|
||||
# used for running all local and CI tasks
|
||||
- name: task
|
||||
version:
|
||||
want: v3.44.1
|
||||
want: v3.45.5
|
||||
method: github-release
|
||||
with:
|
||||
repo: go-task/task
|
||||
@ -98,7 +98,7 @@ tools:
|
||||
# used for triggering a release
|
||||
- name: gh
|
||||
version:
|
||||
want: v2.76.2
|
||||
want: v2.83.1
|
||||
method: github-release
|
||||
with:
|
||||
repo: cli/cli
|
||||
@ -106,7 +106,7 @@ tools:
|
||||
# used to upload test fixture cache
|
||||
- name: oras
|
||||
version:
|
||||
want: v1.2.3
|
||||
want: v1.3.0
|
||||
method: github-release
|
||||
with:
|
||||
repo: oras-project/oras
|
||||
@ -114,7 +114,7 @@ tools:
|
||||
# used to upload test fixture cache
|
||||
- name: yq
|
||||
version:
|
||||
want: v4.47.1
|
||||
want: v4.48.2
|
||||
method: github-release
|
||||
with:
|
||||
repo: mikefarah/yq
|
||||
|
||||
@ -9,6 +9,9 @@ permit:
|
||||
- Unlicense
|
||||
|
||||
ignore-packages:
|
||||
# https://github.com/sorairolake/lzip-go/blob/34a2615d2abf740175c6b0a835baa08364e09430/go.sum.license#L3
|
||||
# has `SPDX-License-Identifier: Apache-2.0 OR MIT`, both of which are acceptable
|
||||
- github.com/sorairolake/lzip-go
|
||||
# packageurl-go is released under the MIT license located in the root of the repo at /mit.LICENSE
|
||||
- github.com/anchore/packageurl-go
|
||||
|
||||
|
||||
6
.github/actions/bootstrap/action.yaml
vendored
6
.github/actions/bootstrap/action.yaml
vendored
@ -29,7 +29,7 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
# note: go mod and build is automatically cached on default with v4+
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
if: inputs.go-version != ''
|
||||
with:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
@ -37,7 +37,7 @@ runs:
|
||||
- name: Restore tool cache
|
||||
if: inputs.tools == 'true'
|
||||
id: tool-cache
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ github.workspace }}/.tool
|
||||
key: ${{ inputs.cache-key-prefix }}-${{ runner.os }}-tool-${{ hashFiles('.binny.yaml') }}
|
||||
@ -63,7 +63,7 @@ runs:
|
||||
|
||||
- name: Restore ORAS cache from github actions
|
||||
if: inputs.download-test-fixture-cache == 'true'
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ github.workspace }}/.tmp/oras-cache
|
||||
key: ${{ inputs.cache-key-prefix }}-oras-cache
|
||||
|
||||
10
.github/workflows/codeql-analysis.yml
vendored
10
.github/workflows/codeql-analysis.yml
vendored
@ -36,18 +36,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 #v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 #v6.0.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed #v3.29.5
|
||||
uses: github/codeql-action/init@014f16e7ab1402f30e7c3329d33797e7948572db #v3.29.5
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed #v3.29.5
|
||||
uses: github/codeql-action/autobuild@014f16e7ab1402f30e7c3329d33797e7948572db #v3.29.5
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@ -72,4 +72,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed #v3.29.5
|
||||
uses: github/codeql-action/analyze@014f16e7ab1402f30e7c3329d33797e7948572db #v3.29.5
|
||||
|
||||
2
.github/workflows/detect-schema-changes.yaml
vendored
2
.github/workflows/detect-schema-changes.yaml
vendored
@ -34,7 +34,7 @@ jobs:
|
||||
issues: write
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
18
.github/workflows/release.yaml
vendored
18
.github/workflows/release.yaml
vendored
@ -15,10 +15,20 @@ jobs:
|
||||
environment: release
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Bootstrap environment
|
||||
uses: ./.github/actions/bootstrap
|
||||
|
||||
- name: Validate Apple notarization credentials
|
||||
run: .tool/quill submission list
|
||||
env:
|
||||
QUILL_NOTARY_ISSUER: ${{ secrets.APPLE_NOTARY_ISSUER }}
|
||||
QUILL_NOTARY_KEY_ID: ${{ secrets.APPLE_NOTARY_KEY_ID }}
|
||||
QUILL_NOTARY_KEY: ${{ secrets.APPLE_NOTARY_KEY }}
|
||||
|
||||
- name: Check if running on main
|
||||
if: github.ref != 'refs/heads/main'
|
||||
# we are using the following flag when running `cosign blob-verify` for checksum signature verification:
|
||||
@ -116,7 +126,7 @@ jobs:
|
||||
# required for goreleaser signs section with cosign
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: true
|
||||
@ -161,13 +171,13 @@ jobs:
|
||||
# for updating brew formula in anchore/homebrew-syft
|
||||
GITHUB_BREW_TOKEN: ${{ secrets.ANCHOREOPS_GITHUB_OSS_WRITE_TOKEN }}
|
||||
|
||||
- uses: anchore/sbom-action@7b36ad622f042cab6f59a75c2ac24ccb256e9b45 #v0.20.4
|
||||
- uses: anchore/sbom-action@8e94d75ddd33f69f691467e42275782e4bfefe84 #v0.20.9
|
||||
continue-on-error: true
|
||||
with:
|
||||
file: go.mod
|
||||
artifact-name: sbom.spdx.json
|
||||
|
||||
- uses: 8398a7/action-slack@1750b5085f3ec60384090fb7c52965ef822e869e #v3.18.0
|
||||
- uses: 8398a7/action-slack@77eaa4f1c608a7d68b38af4e3f739dcd8cba273e #v3.19.0
|
||||
continue-on-error: true
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
|
||||
@ -19,7 +19,7 @@ jobs:
|
||||
permissions:
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'anchore' # only run for main repo (not forks)
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -31,11 +31,11 @@ jobs:
|
||||
with:
|
||||
repos: ${{ github.event.inputs.repos }}
|
||||
|
||||
- uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a #v2.1.0
|
||||
- uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 #v2.1.4
|
||||
id: generate-token
|
||||
with:
|
||||
app_id: ${{ secrets.TOKEN_APP_ID }}
|
||||
private_key: ${{ secrets.TOKEN_APP_PRIVATE_KEY }}
|
||||
app-id: ${{ secrets.TOKEN_APP_ID }}
|
||||
private-key: ${{ secrets.TOKEN_APP_PRIVATE_KEY }}
|
||||
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8
|
||||
with:
|
||||
|
||||
8
.github/workflows/update-bootstrap-tools.yml
vendored
8
.github/workflows/update-bootstrap-tools.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'anchore/syft' # only run for main repo
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -45,11 +45,11 @@ jobs:
|
||||
echo "\`\`\`"
|
||||
} >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a #v2.1.0
|
||||
- uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 #v2.1.4
|
||||
id: generate-token
|
||||
with:
|
||||
app_id: ${{ secrets.TOKEN_APP_ID }}
|
||||
private_key: ${{ secrets.TOKEN_APP_PRIVATE_KEY }}
|
||||
app-id: ${{ secrets.TOKEN_APP_ID }}
|
||||
private-key: ${{ secrets.TOKEN_APP_PRIVATE_KEY }}
|
||||
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8
|
||||
with:
|
||||
|
||||
@ -14,26 +14,42 @@ env:
|
||||
jobs:
|
||||
upgrade-cpe-dictionary-index:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
if: github.repository == 'anchore/syft' # only run for main repo
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Bootstrap environment
|
||||
uses: ./.github/actions/bootstrap
|
||||
id: bootstrap
|
||||
|
||||
- name: Bootstrap environment
|
||||
uses: ./.github/actions/bootstrap
|
||||
- name: Login to GitHub Container Registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | ${{ steps.bootstrap.outputs.oras }} login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- run: |
|
||||
make generate-cpe-dictionary-index
|
||||
- name: Pull CPE cache from registry
|
||||
run: make generate:cpe-index:cache:pull
|
||||
|
||||
- uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a #v2.1.0
|
||||
- name: Update CPE cache from NVD API
|
||||
run: make generate:cpe-index:cache:update
|
||||
env:
|
||||
NVD_API_KEY: ${{ secrets.NVD_API_KEY }}
|
||||
|
||||
- name: Generate CPE dictionary index
|
||||
run: make generate:cpe-index:build
|
||||
|
||||
- name: Push updated CPE cache to registry
|
||||
run: make generate:cpe-index:cache:push
|
||||
|
||||
- uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 #v2.1.4
|
||||
id: generate-token
|
||||
with:
|
||||
app_id: ${{ secrets.TOKEN_APP_ID }}
|
||||
private_key: ${{ secrets.TOKEN_APP_PRIVATE_KEY }}
|
||||
app-id: ${{ secrets.TOKEN_APP_ID }}
|
||||
private-key: ${{ secrets.TOKEN_APP_PRIVATE_KEY }}
|
||||
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8
|
||||
with:
|
||||
@ -47,7 +63,7 @@ jobs:
|
||||
Update CPE dictionary index based on the latest available CPE dictionary
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
|
||||
- uses: 8398a7/action-slack@1750b5085f3ec60384090fb7c52965ef822e869e #v3.18.0
|
||||
- uses: 8398a7/action-slack@77eaa4f1c608a7d68b38af4e3f739dcd8cba273e #v3.19.0
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: workflow,eventName,job
|
||||
|
||||
54
.github/workflows/update-spdx-license-list.yaml
vendored
Normal file
54
.github/workflows/update-spdx-license-list.yaml
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
name: PR to update SPDX license list
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 6 * * 1" # every monday at 6 AM UTC
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
SLACK_NOTIFICATIONS: true
|
||||
|
||||
jobs:
|
||||
upgrade-spdx-license-list:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'anchore/syft' # only run for main repo
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Bootstrap environment
|
||||
uses: ./.github/actions/bootstrap
|
||||
|
||||
- run: |
|
||||
make generate-license-list
|
||||
|
||||
- uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a #v2.1.0
|
||||
id: generate-token
|
||||
with:
|
||||
app_id: ${{ secrets.TOKEN_APP_ID }}
|
||||
private_key: ${{ secrets.TOKEN_APP_PRIVATE_KEY }}
|
||||
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8
|
||||
with:
|
||||
signoff: true
|
||||
delete-branch: true
|
||||
branch: auto/latest-spdx-license-list
|
||||
labels: dependencies
|
||||
commit-message: "chore(deps): update SPDX license list"
|
||||
title: "chore(deps): update SPDX license list"
|
||||
body: |
|
||||
Update SPDX license list based on the latest available list from spdx.org
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
|
||||
- uses: 8398a7/action-slack@77eaa4f1c608a7d68b38af4e3f739dcd8cba273e #v3.19.0
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: workflow,eventName,job
|
||||
text: Syft SPDX license list update failed
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_TOOLBOX_WEBHOOK_URL }}
|
||||
if: ${{ failure() && env.SLACK_NOTIFICATIONS == 'true' }}
|
||||
@ -23,12 +23,12 @@ jobs:
|
||||
contents: read
|
||||
security-events: write # for uploading SARIF results
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run zizmor"
|
||||
uses: zizmorcore/zizmor-action@f52a838cfabf134edcbaa7c8b3677dde20045018 # v0.1.1
|
||||
uses: zizmorcore/zizmor-action@e673c3917a1aef3c65c972347ed84ccd013ecda4 # v0.2.0
|
||||
with:
|
||||
config-file: .github/zizmor.yml
|
||||
sarif-upload: true
|
||||
|
||||
32
.github/workflows/validations.yaml
vendored
32
.github/workflows/validations.yaml
vendored
@ -17,7 +17,7 @@ jobs:
|
||||
name: "Static analysis"
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -34,7 +34,7 @@ jobs:
|
||||
# we need more storage than what's on the default runner
|
||||
runs-on: ubuntu-22.04-4core-16gb
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -52,7 +52,7 @@ jobs:
|
||||
name: "Integration tests"
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -72,7 +72,7 @@ jobs:
|
||||
name: "Build snapshot artifacts"
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -90,7 +90,7 @@ jobs:
|
||||
# why not use actions/upload-artifact? It is very slow (3 minutes to upload ~600MB of data, vs 10 seconds with this approach).
|
||||
# see https://github.com/actions/upload-artifact/issues/199 for more info
|
||||
- name: Upload snapshot artifacts
|
||||
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
# we need to preserve the snapshot data itself as well as the task data that confirms if the
|
||||
# snapshot build is stale or not. Otherwise the downstream jobs will attempt to rebuild the snapshot
|
||||
@ -107,7 +107,7 @@ jobs:
|
||||
needs: [Build-Snapshot-Artifacts]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -118,7 +118,7 @@ jobs:
|
||||
|
||||
- name: Download snapshot build
|
||||
id: snapshot-cache
|
||||
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
snapshot
|
||||
@ -133,7 +133,7 @@ jobs:
|
||||
|
||||
- run: npm install @actions/artifact@2.2.2
|
||||
|
||||
- uses: actions/github-script@v7
|
||||
- uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
const { readdirSync } = require('fs')
|
||||
@ -164,7 +164,7 @@ jobs:
|
||||
needs: [Build-Snapshot-Artifacts]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -175,7 +175,7 @@ jobs:
|
||||
|
||||
- name: Download snapshot build
|
||||
id: snapshot-cache
|
||||
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
snapshot
|
||||
@ -210,9 +210,9 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -225,7 +225,7 @@ jobs:
|
||||
|
||||
- name: Download snapshot build
|
||||
id: snapshot-cache
|
||||
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
snapshot
|
||||
@ -251,7 +251,7 @@ jobs:
|
||||
needs: [Build-Snapshot-Artifacts]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@ -262,7 +262,7 @@ jobs:
|
||||
|
||||
- name: Download snapshot build
|
||||
id: snapshot-cache
|
||||
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
snapshot
|
||||
@ -291,7 +291,7 @@ jobs:
|
||||
- Cli-Linux
|
||||
- Upload-Snapshot-Artifacts
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -2,6 +2,7 @@
|
||||
go.work
|
||||
go.work.sum
|
||||
.tool-versions
|
||||
.python-version
|
||||
|
||||
# app configuration
|
||||
/.syft.yaml
|
||||
@ -16,6 +17,8 @@ bin/
|
||||
/snapshot
|
||||
/.tool
|
||||
/.task
|
||||
/generate
|
||||
/specs
|
||||
|
||||
# changelog generation
|
||||
CHANGELOG.md
|
||||
@ -70,3 +73,5 @@ cosign.pub
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
|
||||
|
||||
@ -42,18 +42,19 @@ builds:
|
||||
dir: ./cmd/syft
|
||||
binary: syft
|
||||
goos: [windows]
|
||||
goarch: [amd64]
|
||||
goarch: [amd64, arm64]
|
||||
mod_timestamp: *build-timestamp
|
||||
ldflags: *build-ldflags
|
||||
|
||||
archives:
|
||||
- id: linux-archives
|
||||
builds: [linux-build]
|
||||
ids: [linux-build]
|
||||
- id: darwin-archives
|
||||
builds: [darwin-build]
|
||||
ids: [darwin-build]
|
||||
- id: windows-archives
|
||||
format: zip
|
||||
builds: [windows-build]
|
||||
formats:
|
||||
- zip
|
||||
ids: [windows-build]
|
||||
|
||||
nfpms:
|
||||
- license: "Apache 2.0"
|
||||
@ -336,6 +337,7 @@ signs:
|
||||
certificate: "${artifact}.pem"
|
||||
args:
|
||||
- "sign-blob"
|
||||
- "--use-signing-config=false"
|
||||
- "--oidc-issuer=https://token.actions.githubusercontent.com"
|
||||
- "--output-certificate=${certificate}"
|
||||
- "--output-signature=${signature}"
|
||||
|
||||
@ -27,7 +27,7 @@ Also double check that the docker context being used is the default context. If
|
||||
|
||||
`docker context use default`
|
||||
|
||||
After cloning the following step can help you get setup:
|
||||
After cloning, the following steps can help you get setup:
|
||||
1. run `make bootstrap` to download go mod dependencies, create the `/.tmp` dir, and download helper utilities.
|
||||
2. run `make` to view the selection of developer commands in the Makefile
|
||||
3. run `make build` to build the release snapshot binaries and packages
|
||||
@ -120,7 +120,7 @@ sequenceDiagram
|
||||
source-->>+sbom: add source to SBOM struct
|
||||
source-->>+catalog: pass src to generate catalog
|
||||
catalog-->-sbom: add cataloging results onto SBOM
|
||||
sbom-->>encoder: pass SBOM and format desiered to syft encoder
|
||||
sbom-->>encoder: pass SBOM and format desired to syft encoder
|
||||
encoder-->>source: return bytes that are the SBOM of the original input
|
||||
|
||||
Note right of catalog: cataloger configuration is done based on src
|
||||
@ -186,7 +186,7 @@ Cataloger names should be unique and named with the following rules of thumb in
|
||||
- Use lowercase letters, numbers, and hyphens only
|
||||
- Use hyphens to separate words
|
||||
- Catalogers for language ecosystems should start with the language name (e.g. `python-` for a cataloger that raises up python packages)
|
||||
- Distinct between when the cataloger is searching for evidence of installed packages vs declared packages. For example, there are currently two different gemspec-based catalogers, the `ruby-gemspec-cataloger` and `ruby-installed-gemspec-cataloger`, where the latter requires that the gemspec is found within a `specifications` directory (which means it was installed, not just at the root of a source repo).
|
||||
- Distinguish between when the cataloger is searching for evidence of installed packages vs declared packages. For example, there are currently two different gemspec-based catalogers, the `ruby-gemspec-cataloger` and `ruby-installed-gemspec-cataloger`, where the latter requires that the gemspec is found within a `specifications` directory (which means it was installed, not just at the root of a source repo).
|
||||
|
||||
#### Building a new Cataloger
|
||||
|
||||
|
||||
@ -106,8 +106,8 @@ syft <image> -o <format>
|
||||
Where the `formats` available are:
|
||||
- `syft-json`: Use this to get as much information out of Syft as possible!
|
||||
- `syft-text`: A row-oriented, human-and-machine-friendly output.
|
||||
- `cyclonedx-xml`: A XML report conforming to the [CycloneDX 1.6 specification](https://cyclonedx.org/specification/overview/).
|
||||
- `cyclonedx-xml@1.5`: A XML report conforming to the [CycloneDX 1.5 specification](https://cyclonedx.org/specification/overview/).
|
||||
- `cyclonedx-xml`: An XML report conforming to the [CycloneDX 1.6 specification](https://cyclonedx.org/specification/overview/).
|
||||
- `cyclonedx-xml@1.5`: An XML report conforming to the [CycloneDX 1.5 specification](https://cyclonedx.org/specification/overview/).
|
||||
- `cyclonedx-json`: A JSON report conforming to the [CycloneDX 1.6 specification](https://cyclonedx.org/specification/overview/).
|
||||
- `cyclonedx-json@1.5`: A JSON report conforming to the [CycloneDX 1.5 specification](https://cyclonedx.org/specification/overview/).
|
||||
- `spdx-tag-value`: A tag-value formatted report conforming to the [SPDX 2.3 specification](https://spdx.github.io/spdx-spec/v2.3/).
|
||||
@ -116,7 +116,7 @@ Where the `formats` available are:
|
||||
- `spdx-json@2.2`: A JSON report conforming to the [SPDX 2.2 JSON Schema](https://github.com/spdx/spdx-spec/blob/v2.2/schemas/spdx-schema.json).
|
||||
- `github-json`: A JSON report conforming to GitHub's dependency snapshot format.
|
||||
- `syft-table`: A columnar summary (default).
|
||||
- `template`: Lets the user specify the output format. See ["Using templates"](#using-templates) below.
|
||||
- `template`: Lets the user specify the output format. See ["Using templates"](https://github.com/anchore/syft/wiki/using-templates) below.
|
||||
|
||||
Note that flags using the @<version> can be used for earlier versions of each specification as well.
|
||||
|
||||
@ -133,8 +133,9 @@ Note that flags using the @<version> can be used for earlier versions of each sp
|
||||
- Elixir (mix)
|
||||
- Erlang (rebar3)
|
||||
- Go (go.mod, Go binaries)
|
||||
- GitHub (workflows, actions)
|
||||
- Haskell (cabal, stack)
|
||||
- Java (jar, ear, war, par, sar, nar, native-image)
|
||||
- Java (jar, ear, war, par, sar, nar, rar, native-image)
|
||||
- JavaScript (npm, yarn)
|
||||
- Jenkins Plugins (jpi, hpi)
|
||||
- Linux kernel archives (vmlinz)
|
||||
|
||||
@ -29,4 +29,4 @@ To report a security issue, please email
|
||||
with a description of the issue, the steps you took to create the issue,
|
||||
affected versions, and, if known, mitigations for the issue.
|
||||
|
||||
All support will be made on the best effort base, so please indicate the "urgency level" of the vulnerability as Critical, High, Medium or Low.
|
||||
All support will be made on a best effort basis, so please indicate the "urgency level" of the vulnerability as Critical, High, Medium or Low.
|
||||
|
||||
@ -1,5 +1,9 @@
|
||||
|
||||
version: "3"
|
||||
|
||||
includes:
|
||||
generate:cpe-index: ./task.d/generate/cpe-index.yaml
|
||||
|
||||
vars:
|
||||
OWNER: anchore
|
||||
PROJECT: syft
|
||||
@ -502,7 +506,7 @@ tasks:
|
||||
generate-json-schema:
|
||||
desc: Generate a new JSON schema
|
||||
cmds:
|
||||
- "cd syft/internal && go generate . && cd jsonschema && go run . && go fmt ../..."
|
||||
- "cd ./internal && go generate . && cd ./jsonschema && go run . && go fmt ../..."
|
||||
|
||||
generate-license-list:
|
||||
desc: Generate an updated license processing code off of the latest available SPDX license list
|
||||
@ -511,10 +515,11 @@ tasks:
|
||||
- "gofmt -s -w ./internal/spdxlicense"
|
||||
|
||||
generate-cpe-dictionary-index:
|
||||
desc: Generate the CPE index based off of the latest available CPE dictionary
|
||||
dir: "syft/pkg/cataloger/internal/cpegenerate/dictionary"
|
||||
desc: Generate the CPE index from local cache
|
||||
cmds:
|
||||
- "go generate"
|
||||
- task: generate:cpe-index:cache:pull
|
||||
- task: generate:cpe-index:cache:update
|
||||
- task: generate:cpe-index:build
|
||||
|
||||
|
||||
## Build-related targets #################################
|
||||
|
||||
@ -253,7 +253,6 @@ func generateSBOMForAttestation(ctx context.Context, id clio.Identification, opt
|
||||
}
|
||||
|
||||
src, err := getSource(ctx, opts, userInput, stereoscope.RegistryTag)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -87,8 +87,8 @@ func runCatalogerList(opts *catalogerListOptions) error {
|
||||
}
|
||||
|
||||
func catalogerListReport(opts *catalogerListOptions, allTaskGroups [][]task.Task) (string, error) {
|
||||
defaultCatalogers := options.Flatten(opts.DefaultCatalogers)
|
||||
selectCatalogers := options.Flatten(opts.SelectCatalogers)
|
||||
defaultCatalogers := options.FlattenAndSort(opts.DefaultCatalogers)
|
||||
selectCatalogers := options.FlattenAndSort(opts.SelectCatalogers)
|
||||
selectedTaskGroups, selectionEvidence, err := task.SelectInGroups(
|
||||
allTaskGroups,
|
||||
cataloging.NewSelectionRequest().
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
"go.yaml.in/yaml/v3"
|
||||
|
||||
"github.com/anchore/clio"
|
||||
"github.com/anchore/fangs"
|
||||
@ -185,7 +185,6 @@ func runScan(ctx context.Context, id clio.Identification, opts *scanOptions, use
|
||||
}
|
||||
|
||||
src, err := getSource(ctx, &opts.Catalog, userInput, sources...)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -220,6 +219,7 @@ func getSource(ctx context.Context, opts *options.Catalog, userInput string, sou
|
||||
WithAlias(source.Alias{
|
||||
Name: opts.Source.Name,
|
||||
Version: opts.Source.Version,
|
||||
Supplier: opts.Source.Supplier,
|
||||
}).
|
||||
WithExcludeConfig(source.ExcludeConfig{
|
||||
Paths: opts.Exclusions,
|
||||
|
||||
@ -198,9 +198,10 @@ func (cfg Catalog) ToPackagesConfig() pkgcataloging.Config {
|
||||
},
|
||||
Nix: nix.DefaultConfig().
|
||||
WithCaptureOwnedFiles(cfg.Nix.CaptureOwnedFiles),
|
||||
Python: python.CatalogerConfig{
|
||||
GuessUnpinnedRequirements: cfg.Python.GuessUnpinnedRequirements,
|
||||
},
|
||||
Python: python.DefaultCatalogerConfig().
|
||||
WithSearchRemoteLicenses(*multiLevelOption(false, enrichmentEnabled(cfg.Enrich, task.Python), cfg.Python.SearchRemoteLicenses)).
|
||||
WithPypiBaseURL(cfg.Python.PypiBaseURL).
|
||||
WithGuessUnpinnedRequirements(*multiLevelOption(false, enrichmentEnabled(cfg.Enrich, task.Python), cfg.Python.GuessUnpinnedRequirements)),
|
||||
JavaArchive: java.DefaultArchiveCatalogerConfig().
|
||||
WithUseMavenLocalRepository(*multiLevelOption(false, enrichmentEnabled(cfg.Enrich, task.Java, task.Maven), cfg.Java.UseMavenLocalRepository)).
|
||||
WithMavenLocalRepositoryDir(cfg.Java.MavenLocalRepositoryDir).
|
||||
@ -259,6 +260,9 @@ func (cfg *Catalog) AddFlags(flags clio.FlagSet) {
|
||||
|
||||
flags.StringVarP(&cfg.Source.BasePath, "base-path", "",
|
||||
"base directory for scanning, no links will be followed above this directory, and all paths will be reported relative to this directory")
|
||||
|
||||
flags.StringVarP(&cfg.Source.Supplier, "source-supplier", "",
|
||||
"the organization that supplied the component, which often may be the manufacturer, distributor, or repackager")
|
||||
}
|
||||
|
||||
func (cfg *Catalog) DescribeFields(descriptions fangs.FieldDescriptionSet) {
|
||||
@ -280,10 +284,10 @@ func (cfg *Catalog) PostLoad() error {
|
||||
|
||||
cfg.From = Flatten(cfg.From)
|
||||
|
||||
cfg.Catalogers = Flatten(cfg.Catalogers)
|
||||
cfg.DefaultCatalogers = Flatten(cfg.DefaultCatalogers)
|
||||
cfg.SelectCatalogers = Flatten(cfg.SelectCatalogers)
|
||||
cfg.Enrich = Flatten(cfg.Enrich)
|
||||
cfg.Catalogers = FlattenAndSort(cfg.Catalogers)
|
||||
cfg.DefaultCatalogers = FlattenAndSort(cfg.DefaultCatalogers)
|
||||
cfg.SelectCatalogers = FlattenAndSort(cfg.SelectCatalogers)
|
||||
cfg.Enrich = FlattenAndSort(cfg.Enrich)
|
||||
|
||||
// for backwards compatibility
|
||||
cfg.DefaultCatalogers = append(cfg.DefaultCatalogers, cfg.Catalogers...)
|
||||
@ -308,6 +312,11 @@ func Flatten(commaSeparatedEntries []string) []string {
|
||||
out = append(out, strings.TrimSpace(s))
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func FlattenAndSort(commaSeparatedEntries []string) []string {
|
||||
out := Flatten(commaSeparatedEntries)
|
||||
sort.Strings(out)
|
||||
return out
|
||||
}
|
||||
@ -317,6 +326,7 @@ var publicisedEnrichmentOptions = []string{
|
||||
task.Golang,
|
||||
task.Java,
|
||||
task.JavaScript,
|
||||
task.Python,
|
||||
}
|
||||
|
||||
func enrichmentEnabled(enrichDirectives []string, features ...string) *bool {
|
||||
|
||||
@ -79,6 +79,98 @@ func TestCatalog_PostLoad(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlatten(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "preserves order of comma-separated values",
|
||||
input: []string{"registry,docker,oci-dir"},
|
||||
expected: []string{"registry", "docker", "oci-dir"},
|
||||
},
|
||||
{
|
||||
name: "preserves order across multiple entries",
|
||||
input: []string{"registry,docker", "oci-dir"},
|
||||
expected: []string{"registry", "docker", "oci-dir"},
|
||||
},
|
||||
{
|
||||
name: "trims whitespace",
|
||||
input: []string{" registry , docker ", " oci-dir "},
|
||||
expected: []string{"registry", "docker", "oci-dir"},
|
||||
},
|
||||
{
|
||||
name: "handles single value",
|
||||
input: []string{"registry"},
|
||||
expected: []string{"registry"},
|
||||
},
|
||||
{
|
||||
name: "handles empty input",
|
||||
input: []string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "preserves reverse alphabetical order",
|
||||
input: []string{"zebra,yankee,xray"},
|
||||
expected: []string{"zebra", "yankee", "xray"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := Flatten(tt.input)
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlattenAndSort(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "sorts comma-separated values",
|
||||
input: []string{"registry,docker,oci-dir"},
|
||||
expected: []string{"docker", "oci-dir", "registry"},
|
||||
},
|
||||
{
|
||||
name: "sorts across multiple entries",
|
||||
input: []string{"registry,docker", "oci-dir"},
|
||||
expected: []string{"docker", "oci-dir", "registry"},
|
||||
},
|
||||
{
|
||||
name: "trims whitespace and sorts",
|
||||
input: []string{" registry , docker ", " oci-dir "},
|
||||
expected: []string{"docker", "oci-dir", "registry"},
|
||||
},
|
||||
{
|
||||
name: "handles single value",
|
||||
input: []string{"registry"},
|
||||
expected: []string{"registry"},
|
||||
},
|
||||
{
|
||||
name: "handles empty input",
|
||||
input: []string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "sorts reverse alphabetical order",
|
||||
input: []string{"zebra,yankee,xray"},
|
||||
expected: []string{"xray", "yankee", "zebra"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := FlattenAndSort(tt.input)
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_enrichmentEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
directives string
|
||||
@ -139,7 +231,7 @@ func Test_enrichmentEnabled(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.directives, func(t *testing.T) {
|
||||
got := enrichmentEnabled(Flatten([]string{test.directives}), test.test)
|
||||
got := enrichmentEnabled(FlattenAndSort([]string{test.directives}), test.test)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
|
||||
@ -3,7 +3,9 @@ package options
|
||||
import "github.com/anchore/clio"
|
||||
|
||||
type pythonConfig struct {
|
||||
GuessUnpinnedRequirements bool `json:"guess-unpinned-requirements" yaml:"guess-unpinned-requirements" mapstructure:"guess-unpinned-requirements"`
|
||||
SearchRemoteLicenses *bool `json:"search-remote-licenses" yaml:"search-remote-licenses" mapstructure:"search-remote-licenses"`
|
||||
PypiBaseURL string `json:"pypi-base-url" yaml:"pypi-base-url" mapstructure:"pypi-base-url"`
|
||||
GuessUnpinnedRequirements *bool `json:"guess-unpinned-requirements" yaml:"guess-unpinned-requirements" mapstructure:"guess-unpinned-requirements"`
|
||||
}
|
||||
|
||||
var _ interface {
|
||||
@ -11,6 +13,8 @@ var _ interface {
|
||||
} = (*pythonConfig)(nil)
|
||||
|
||||
func (o *pythonConfig) DescribeFields(descriptions clio.FieldDescriptionSet) {
|
||||
descriptions.Add(&o.SearchRemoteLicenses, `enables Syft to use the network to fill in more detailed license information`)
|
||||
descriptions.Add(&o.PypiBaseURL, `base Pypi url to use`)
|
||||
descriptions.Add(&o.GuessUnpinnedRequirements, `when running across entries in requirements.txt that do not specify a specific version
|
||||
(e.g. "sqlalchemy >= 1.0.0, <= 2.0.0, != 3.0.0, <= 3.0.0"), attempt to guess what the version could
|
||||
be based on the version requirements specified (e.g. "1.0.0"). When enabled the lowest expressible version
|
||||
|
||||
@ -16,6 +16,8 @@ import (
|
||||
type sourceConfig struct {
|
||||
Name string `json:"name" yaml:"name" mapstructure:"name"`
|
||||
Version string `json:"version" yaml:"version" mapstructure:"version"`
|
||||
Supplier string `json:"supplier" yaml:"supplier" mapstructure:"supplier"`
|
||||
Source string `json:"source" yaml:"source" mapstructure:"source"`
|
||||
BasePath string `yaml:"base-path" json:"base-path" mapstructure:"base-path"` // specify base path for all file paths
|
||||
File fileSource `json:"file" yaml:"file" mapstructure:"file"`
|
||||
Image imageSource `json:"image" yaml:"image" mapstructure:"image"`
|
||||
|
||||
@ -25,7 +25,6 @@ func BenchmarkImagePackageCatalogers(b *testing.B) {
|
||||
// get the source object for the image
|
||||
theSource, err := syft.GetSource(context.Background(), tarPath, syft.DefaultGetSourceConfig().WithSources("docker-archive"))
|
||||
require.NoError(b, err)
|
||||
|
||||
b.Cleanup(func() {
|
||||
require.NoError(b, theSource.Close())
|
||||
})
|
||||
@ -87,6 +86,8 @@ func TestPkgCoverageImage(t *testing.T) {
|
||||
definedPkgs.Remove(string(pkg.GithubActionWorkflowPkg))
|
||||
definedPkgs.Remove(string(pkg.TerraformPkg))
|
||||
definedPkgs.Remove(string(pkg.PhpPeclPkg)) // we have coverage for pear instead
|
||||
definedPkgs.Remove(string(pkg.CondaPkg))
|
||||
definedPkgs.Remove(string(pkg.ModelPkg))
|
||||
|
||||
var cases []testCase
|
||||
cases = append(cases, commonTestCases...)
|
||||
@ -159,7 +160,9 @@ func TestPkgCoverageDirectory(t *testing.T) {
|
||||
definedPkgs.Remove(string(pkg.LinuxKernelModulePkg))
|
||||
definedPkgs.Remove(string(pkg.Rpkg))
|
||||
definedPkgs.Remove(string(pkg.UnknownPkg))
|
||||
definedPkgs.Remove(string(pkg.CondaPkg))
|
||||
definedPkgs.Remove(string(pkg.PhpPeclPkg)) // this is covered as pear packages
|
||||
definedPkgs.Remove(string(pkg.ModelPkg))
|
||||
|
||||
// for directory scans we should not expect to see any of the following package types
|
||||
definedPkgs.Remove(string(pkg.KbPkg))
|
||||
|
||||
@ -78,7 +78,10 @@ func TestEncodeDecodeEncodeCycleComparison(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for _, image := range images {
|
||||
originalSBOM, _ := catalogFixtureImage(t, image, source.SquashedScope)
|
||||
|
||||
// we need a way to inject supplier into this test
|
||||
// supplier is not available as part of the SBOM Config API since the flag
|
||||
// is used in conjunction with the SourceConfig which is injected into generateSBOM during scan
|
||||
originalSBOM.Source.Supplier = "anchore"
|
||||
f := encoders.GetByString(test.name)
|
||||
require.NotNil(t, f)
|
||||
|
||||
|
||||
@ -30,10 +30,10 @@ func TestPackageDeduplication(t *testing.T) {
|
||||
locationCount: map[string]int{
|
||||
"basesystem-11-13.el9": 5, // in all layers
|
||||
"curl-minimal-7.76.1-26.el9_3.2.0.1": 2, // base + wget layer
|
||||
"curl-minimal-7.76.1-31.el9": 3, // curl upgrade layer + all above layers
|
||||
"curl-minimal-7.76.1-31.el9_6.1": 3, // curl upgrade layer + all above layers
|
||||
"wget-1.21.1-8.el9_4": 4, // wget + all above layers
|
||||
"vsftpd-3.0.5-6.el9": 2, // vsftpd + all above layers
|
||||
"httpd-2.4.62-4.el9": 1, // last layer
|
||||
"httpd-2.4.62-4.el9_6.4": 1, // last layer
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -48,10 +48,10 @@ func TestPackageDeduplication(t *testing.T) {
|
||||
},
|
||||
locationCount: map[string]int{
|
||||
"basesystem-11-13.el9": 1,
|
||||
"curl-minimal-7.76.1-31.el9": 1, // upgrade
|
||||
"curl-minimal-7.76.1-31.el9_6.1": 1, // upgrade
|
||||
"wget-1.21.1-8.el9_4": 1,
|
||||
"vsftpd-3.0.5-6.el9": 1,
|
||||
"httpd-2.4.62-4.el9": 1,
|
||||
"httpd-2.4.62-4.el9_6.4": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -7,16 +7,16 @@ FROM --platform=linux/amd64 rockylinux:9.3.20231119@sha256:d644d203142cd5b54ad2a
|
||||
# copying the RPM DB from each stage to a final stage in separate layers. This will result in a much smaller image.
|
||||
|
||||
FROM base AS stage1
|
||||
RUN dnf install -y wget
|
||||
RUN dnf install -y wget-1.21.1-8.el9_4
|
||||
|
||||
FROM stage1 AS stage2
|
||||
RUN dnf update -y curl-minimal
|
||||
RUN dnf update -y curl-minimal-7.76.1-31.el9_6.1
|
||||
|
||||
FROM stage2 AS stage3
|
||||
RUN dnf install -y vsftpd
|
||||
RUN dnf install -y vsftpd-3.0.5-6.el9
|
||||
|
||||
FROM stage3 AS stage4
|
||||
RUN dnf install -y httpd
|
||||
RUN dnf install -y httpd-2.4.62-4.el9_6.4
|
||||
|
||||
FROM scratch
|
||||
|
||||
|
||||
@ -38,11 +38,11 @@ func catalogFixtureImageWithConfig(t *testing.T, fixtureImageName string, cfg *s
|
||||
// get the source to build an SBOM against
|
||||
theSource, err := syft.GetSource(context.Background(), tarPath, syft.DefaultGetSourceConfig().WithSources("docker-archive"))
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, theSource.Close())
|
||||
})
|
||||
|
||||
// build the SBOM
|
||||
s, err := syft.CreateSBOM(context.Background(), theSource, cfg)
|
||||
|
||||
require.NoError(t, err)
|
||||
@ -66,7 +66,7 @@ func catalogDirectory(t *testing.T, dir string, catalogerSelection ...string) (s
|
||||
func catalogDirectoryWithConfig(t *testing.T, dir string, cfg *syft.CreateSBOMConfig) (sbom.SBOM, source.Source) {
|
||||
cfg.CatalogerSelection = cfg.CatalogerSelection.WithDefaults(pkgcataloging.DirectoryTag)
|
||||
|
||||
// get the source to build an sbom against
|
||||
// get the source to build an SBOM against
|
||||
theSource, err := syft.GetSource(context.Background(), dir, syft.DefaultGetSourceConfig().WithSources("dir"))
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
|
||||
@ -6,7 +6,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
"go.yaml.in/yaml/v3"
|
||||
_ "modernc.org/sqlite" // required for rpmdb and other features
|
||||
|
||||
"github.com/anchore/syft/syft"
|
||||
"github.com/anchore/syft/syft/cataloging"
|
||||
@ -22,6 +23,7 @@ const defaultImage = "alpine:3.19"
|
||||
func main() {
|
||||
// automagically get a source.Source for arbitrary string input
|
||||
src := getSource(imageReference())
|
||||
defer src.Close()
|
||||
|
||||
// will catalog the given source and return a SBOM keeping in mind several configurable options
|
||||
sbom := getSBOM(src)
|
||||
@ -45,7 +47,6 @@ func getSource(input string) source.Source {
|
||||
fmt.Println("detecting source type for input:", input, "...")
|
||||
|
||||
src, err := syft.GetSource(context.Background(), input, nil)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
_ "modernc.org/sqlite" // required for rpmdb and other features
|
||||
|
||||
"github.com/anchore/syft/syft"
|
||||
"github.com/anchore/syft/syft/format"
|
||||
"github.com/anchore/syft/syft/format/syftjson"
|
||||
@ -17,6 +19,7 @@ const defaultImage = "alpine:3.19"
|
||||
func main() {
|
||||
// automagically get a source.Source for arbitrary string input
|
||||
src := getSource(imageReference())
|
||||
defer src.Close()
|
||||
|
||||
// catalog the given source and return a SBOM
|
||||
sbom := getSBOM(src)
|
||||
@ -38,7 +41,6 @@ func imageReference() string {
|
||||
|
||||
func getSource(input string) source.Source {
|
||||
src, err := syft.GetSource(context.Background(), input, nil)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
_ "modernc.org/sqlite" // required for rpmdb and other features
|
||||
|
||||
"github.com/anchore/syft/syft"
|
||||
"github.com/anchore/syft/syft/cataloging"
|
||||
"github.com/anchore/syft/syft/cataloging/pkgcataloging"
|
||||
@ -17,6 +19,7 @@ const defaultImage = "alpine:3.19"
|
||||
func main() {
|
||||
// automagically get a source.Source for arbitrary string input
|
||||
src := getSource(imageReference())
|
||||
defer src.Close()
|
||||
|
||||
// catalog the given source and return a SBOM
|
||||
// let's explicitly use catalogers that are:
|
||||
@ -42,7 +45,6 @@ func imageReference() string {
|
||||
|
||||
func getSource(input string) source.Source {
|
||||
src, err := syft.GetSource(context.Background(), input, nil)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
_ "modernc.org/sqlite" // required for rpmdb and other features
|
||||
|
||||
"github.com/anchore/syft/syft"
|
||||
"github.com/anchore/syft/syft/format/syftjson"
|
||||
)
|
||||
@ -13,6 +15,7 @@ func main() {
|
||||
image := "alpine:3.19"
|
||||
|
||||
src, _ := syft.GetSource(context.Background(), image, syft.DefaultGetSourceConfig().WithSources("registry"))
|
||||
defer src.Close()
|
||||
|
||||
sbom, _ := syft.CreateSBOM(context.Background(), src, syft.DefaultCreateSBOMConfig())
|
||||
|
||||
|
||||
156
go.mod
156
go.mod
@ -4,18 +4,17 @@ go 1.24.1
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.5.0
|
||||
github.com/CycloneDX/cyclonedx-go v0.9.2
|
||||
github.com/CycloneDX/cyclonedx-go v0.9.3
|
||||
github.com/Masterminds/semver/v3 v3.4.0
|
||||
github.com/Masterminds/sprig/v3 v3.3.0
|
||||
github.com/OneOfOne/xxhash v1.2.8
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
github.com/acobaugh/osrelease v0.1.0
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/anchore/archiver/v3 v3.5.3-0.20241210171143-5b1d8d1c7c51
|
||||
github.com/anchore/bubbly v0.0.0-20231115134915-def0aba654a9
|
||||
github.com/anchore/clio v0.0.0-20250319180342-2cfe4b0cb716
|
||||
github.com/anchore/fangs v0.0.0-20250319222917-446a1e748ec2
|
||||
github.com/anchore/go-collections v0.0.0-20240216171411-9321230ce537
|
||||
github.com/anchore/go-collections v0.0.0-20251016125210-a3c352120e8c
|
||||
github.com/anchore/go-homedir v0.0.0-20250319154043-c29668562e4d
|
||||
github.com/anchore/go-logger v0.0.0-20250318195838-07ae343dd722
|
||||
github.com/anchore/go-macholibre v0.0.0-20220308212642-53e6d0aaf6fb
|
||||
@ -24,51 +23,51 @@ require (
|
||||
github.com/anchore/go-testutils v0.0.0-20200925183923-d5f45b0d3c04
|
||||
github.com/anchore/go-version v1.2.2-0.20200701162849-18adb9c92b9b
|
||||
github.com/anchore/packageurl-go v0.1.1-0.20250220190351-d62adb6e1115
|
||||
github.com/anchore/stereoscope v0.1.8
|
||||
github.com/anchore/stereoscope v0.1.12
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||
github.com/aquasecurity/go-pep440-version v0.0.1
|
||||
github.com/bitnami/go-version v0.0.0-20250131085805-b1f57a8634ef
|
||||
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.1
|
||||
github.com/charmbracelet/bubbles v0.21.0
|
||||
github.com/charmbracelet/bubbletea v1.3.6
|
||||
github.com/charmbracelet/bubbletea v1.3.10
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/dave/jennifer v1.7.1
|
||||
github.com/deitch/magic v0.0.0-20230404182410-1ff89d7342da
|
||||
github.com/diskfs/go-diskfs v1.6.1-0.20250601133945-2af1c7ece24c
|
||||
github.com/diskfs/go-diskfs v1.7.0
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/elliotchance/phpserialize v1.4.0
|
||||
github.com/facebookincubator/nvdtools v0.1.5
|
||||
github.com/github/go-spdx/v2 v2.3.3
|
||||
github.com/gkampitakis/go-snaps v0.5.14
|
||||
github.com/github/go-spdx/v2 v2.3.4
|
||||
github.com/gkampitakis/go-snaps v0.5.15
|
||||
github.com/go-git/go-billy/v5 v5.6.2
|
||||
github.com/go-git/go-git/v5 v5.16.2
|
||||
github.com/go-git/go-git/v5 v5.16.3
|
||||
github.com/go-test/deep v1.1.1
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0
|
||||
github.com/gohugoio/hashstructure v0.5.0
|
||||
github.com/gohugoio/hashstructure v0.6.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/go-containerregistry v0.20.6
|
||||
github.com/google/licensecheck v0.3.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gookit/color v1.5.4
|
||||
github.com/gookit/color v1.6.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2
|
||||
github.com/hashicorp/go-getter v1.7.8
|
||||
github.com/hashicorp/go-getter v1.8.3
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/hcl/v2 v2.24.0
|
||||
github.com/iancoleman/strcase v0.3.0
|
||||
github.com/invopop/jsonschema v0.7.0
|
||||
github.com/jedib0t/go-pretty/v6 v6.6.8
|
||||
github.com/jedib0t/go-pretty/v6 v6.7.1
|
||||
github.com/jinzhu/copier v0.4.0
|
||||
github.com/kastenhq/goversion v0.0.0-20230811215019-93b2f8823953
|
||||
github.com/magiconair/properties v1.8.10
|
||||
github.com/mholt/archives v0.1.3
|
||||
github.com/mholt/archives v0.1.5
|
||||
github.com/moby/sys/mountinfo v0.7.2
|
||||
github.com/nix-community/go-nix v0.0.0-20250101154619-4bdde671e0a1
|
||||
github.com/olekukonko/tablewriter v1.0.9
|
||||
github.com/olekukonko/tablewriter v1.1.1
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/pelletier/go-toml v1.9.5
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.23
|
||||
github.com/rust-secure-code/go-rustaudit v0.0.0-20250226111315-e20ec32e963c
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/sanity-io/litter v1.5.8
|
||||
@ -78,21 +77,21 @@ require (
|
||||
github.com/sergi/go-diff v1.4.0
|
||||
github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb
|
||||
github.com/spdx/tools-golang v0.5.5
|
||||
github.com/spf13/afero v1.14.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/vbatts/go-mtree v0.5.4
|
||||
github.com/spf13/afero v1.15.0
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/vbatts/go-mtree v0.6.0
|
||||
github.com/vifraa/gopom v1.0.0
|
||||
github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651
|
||||
github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
github.com/zyedidia/generic v1.2.2-0.20230320175451-4410d2372cb1
|
||||
go.uber.org/goleak v1.3.0
|
||||
go.yaml.in/yaml/v3 v3.0.4
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
|
||||
golang.org/x/mod v0.27.0
|
||||
golang.org/x/net v0.43.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
modernc.org/sqlite v1.38.2
|
||||
golang.org/x/mod v0.30.0
|
||||
golang.org/x/net v0.46.0
|
||||
modernc.org/sqlite v1.40.0
|
||||
)
|
||||
|
||||
require (
|
||||
@ -109,30 +108,29 @@ require (
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.7 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.2.0 // indirect
|
||||
github.com/STARRY-S/zip v0.2.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/STARRY-S/zip v0.2.3 // indirect
|
||||
github.com/agext/levenshtein v1.2.1 // indirect; indirectt
|
||||
github.com/anchore/go-lzo v0.1.0 // indirect
|
||||
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
|
||||
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||
github.com/aquasecurity/go-version v0.0.1 // indirect
|
||||
github.com/atotto/clipboard v0.1.4 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.122 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/becheran/wildmatch-go v1.0.0 // indirect
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.1 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/harmonica v0.2.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.9.3 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/containerd/containerd v1.7.27 // indirect
|
||||
github.com/containerd/containerd v1.7.29 // indirect
|
||||
github.com/containerd/containerd/api v1.8.0 // indirect
|
||||
github.com/containerd/continuity v0.4.4 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
@ -143,19 +141,19 @@ require (
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/ttrpc v1.2.7 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.6.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/docker/cli v28.3.2+incompatible // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v28.3.3+incompatible // indirect
|
||||
github.com/docker/docker v28.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/fatih/color v1.17.0 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
@ -169,20 +167,17 @@ require (
|
||||
github.com/goccy/go-yaml v1.18.0
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-safetemp v1.0.0 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect
|
||||
github.com/iancoleman/orderedmap v0.3.0
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
@ -191,16 +186,15 @@ require (
|
||||
github.com/logrusorgru/aurora v2.0.3+incompatible // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/maruel/natural v1.1.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||
github.com/minio/minlz v1.0.0 // indirect
|
||||
github.com/minio/minlz v1.0.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
@ -213,13 +207,9 @@ require (
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/nwaples/rardecode v1.1.3 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.1.0 // indirect
|
||||
github.com/olekukonko/errors v1.1.0 // indirect
|
||||
github.com/olekukonko/ll v0.0.9 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/opencontainers/selinux v1.13.0 // indirect
|
||||
github.com/pborman/indent v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
@ -236,21 +226,21 @@ require (
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
|
||||
github.com/skeema/knownhosts v1.3.1 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.8 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/spf13/viper v1.20.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.21.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.22.0 // indirect
|
||||
github.com/sylabs/squashfs v1.0.6 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/tidwall/sjson v1.2.5 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
@ -268,27 +258,69 @@ require (
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/crypto v0.43.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/term v0.34.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.14.0
|
||||
golang.org/x/tools v0.38.0
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||
google.golang.org/api v0.203.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241113202542-65e8d215514f // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
|
||||
google.golang.org/grpc v1.67.3 // indirect
|
||||
google.golang.org/protobuf v1.36.4 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
modernc.org/libc v1.66.3 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
modernc.org/libc v1.66.10 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/gpustack/gguf-parser-go v0.22.1
|
||||
)
|
||||
|
||||
require (
|
||||
cyphar.com/go-pathrs v0.2.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
|
||||
github.com/aws/smithy-go v1.22.4 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.3.1 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
|
||||
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 // indirect
|
||||
github.com/henvic/httpretty v0.1.4 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.2.0 // indirect
|
||||
github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect
|
||||
github.com/olekukonko/errors v1.1.0 // indirect
|
||||
github.com/olekukonko/ll v0.1.2 // indirect
|
||||
github.com/smallnest/ringbuffer v0.0.0-20241116012123-461381446e3d // indirect
|
||||
gonum.org/v1/gonum v0.15.1 // indirect
|
||||
)
|
||||
|
||||
retract (
|
||||
v1.25.0 // published with a replace directive (confusing for API users)
|
||||
v0.53.2
|
||||
|
||||
46
internal/capabilities/pkgtestobservation/model.go
Normal file
46
internal/capabilities/pkgtestobservation/model.go
Normal file
@ -0,0 +1,46 @@
|
||||
package pkgtestobservation
|
||||
|
||||
import "time"
|
||||
|
||||
// Observations represents capability observations during testing
|
||||
type Observations struct {
|
||||
License bool `json:"license"`
|
||||
Relationships Relationship `json:"relationships"`
|
||||
FileListing Count `json:"file_listing"`
|
||||
FileDigests Count `json:"file_digests"`
|
||||
IntegrityHash Count `json:"integrity_hash"`
|
||||
}
|
||||
|
||||
// Relationship tracks dependency relationship observations
|
||||
type Relationship struct {
|
||||
Found bool `json:"found"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// Count tracks whether a capability was found and how many times
|
||||
type Count struct {
|
||||
Found bool `json:"found"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// Test is the root structure for test-observations.json
|
||||
type Test struct {
|
||||
Package string `json:"package"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
Catalogers map[string]*Cataloger `json:"catalogers"`
|
||||
Parsers map[string]*Parser `json:"parsers"`
|
||||
}
|
||||
|
||||
// Parser captures all observations for a parser
|
||||
type Parser struct {
|
||||
MetadataTypes []string `json:"metadata_types"`
|
||||
PackageTypes []string `json:"package_types"`
|
||||
Observations Observations `json:"observations"`
|
||||
}
|
||||
|
||||
// Cataloger captures all observations for a cataloger
|
||||
type Cataloger struct {
|
||||
MetadataTypes []string `json:"metadata_types"`
|
||||
PackageTypes []string `json:"package_types"`
|
||||
Observations Observations `json:"observations"`
|
||||
}
|
||||
@ -3,5 +3,9 @@ package internal
|
||||
const (
|
||||
// JSONSchemaVersion is the current schema version output by the JSON encoder
|
||||
// This is roughly following the "SchemaVer" guidelines for versioning the JSON schema. Please see schema/json/README.md for details on how to increment.
|
||||
JSONSchemaVersion = "16.0.36"
|
||||
JSONSchemaVersion = "16.1.0"
|
||||
|
||||
// Changelog
|
||||
// 16.1.0 - reformulated the python pdm fields (added "URL" and removed the unused "path" field).
|
||||
|
||||
)
|
||||
|
||||
@ -1,17 +1,40 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/bmatcuk/doublestar/v4"
|
||||
"github.com/mholt/archives"
|
||||
|
||||
"github.com/anchore/archiver/v3"
|
||||
"github.com/anchore/syft/internal"
|
||||
)
|
||||
|
||||
// TraverseFilesInTar enumerates all paths stored within a tar archive using the visitor pattern.
|
||||
func TraverseFilesInTar(ctx context.Context, archivePath string, visitor archives.FileHandler) error {
|
||||
tarReader, err := os.Open(archivePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open tar archive (%s): %w", archivePath, err)
|
||||
}
|
||||
defer internal.CloseAndLogError(tarReader, archivePath)
|
||||
|
||||
format, _, err := archives.Identify(ctx, archivePath, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to identify tar compression format: %w", err)
|
||||
}
|
||||
|
||||
extractor, ok := format.(archives.Extractor)
|
||||
if !ok {
|
||||
return fmt.Errorf("file format does not support extraction: %s", archivePath)
|
||||
}
|
||||
|
||||
return extractor.Extract(ctx, tarReader, visitor)
|
||||
}
|
||||
|
||||
// ExtractGlobsFromTarToUniqueTempFile extracts paths matching the given globs within the given archive to a temporary directory, returning file openers for each file extracted.
|
||||
func ExtractGlobsFromTarToUniqueTempFile(archivePath, dir string, globs ...string) (map[string]Opener, error) {
|
||||
func ExtractGlobsFromTarToUniqueTempFile(ctx context.Context, archivePath, dir string, globs ...string) (map[string]Opener, error) {
|
||||
results := make(map[string]Opener)
|
||||
|
||||
// don't allow for full traversal, only select traversal from given paths
|
||||
@ -19,9 +42,7 @@ func ExtractGlobsFromTarToUniqueTempFile(archivePath, dir string, globs ...strin
|
||||
return results, nil
|
||||
}
|
||||
|
||||
visitor := func(file archiver.File) error {
|
||||
defer file.Close()
|
||||
|
||||
visitor := func(_ context.Context, file archives.FileInfo) error {
|
||||
// ignore directories
|
||||
if file.IsDir() {
|
||||
return nil
|
||||
@ -43,7 +64,13 @@ func ExtractGlobsFromTarToUniqueTempFile(archivePath, dir string, globs ...strin
|
||||
// provides a ReadCloser. It is up to the caller to handle closing the file explicitly.
|
||||
defer tempFile.Close()
|
||||
|
||||
if err := safeCopy(tempFile, file.ReadCloser); err != nil {
|
||||
packedFile, err := file.Open()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read file=%q from tar=%q: %w", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
defer internal.CloseAndLogError(packedFile, archivePath)
|
||||
|
||||
if err := safeCopy(tempFile, packedFile); err != nil {
|
||||
return fmt.Errorf("unable to copy source=%q for tar=%q: %w", file.Name(), archivePath, err)
|
||||
}
|
||||
|
||||
@ -52,7 +79,7 @@ func ExtractGlobsFromTarToUniqueTempFile(archivePath, dir string, globs ...strin
|
||||
return nil
|
||||
}
|
||||
|
||||
return results, archiver.Walk(archivePath, visitor)
|
||||
return results, TraverseFilesInTar(ctx, archivePath, visitor)
|
||||
}
|
||||
|
||||
func matchesAnyGlob(name string, globs ...string) bool {
|
||||
|
||||
@ -1,10 +1,12 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
"github.com/scylladb/go-set/strset"
|
||||
|
||||
"github.com/anchore/syft/internal/log"
|
||||
@ -14,22 +16,25 @@ import (
|
||||
type ZipFileManifest map[string]os.FileInfo
|
||||
|
||||
// NewZipFileManifest creates and returns a new ZipFileManifest populated with path and metadata from the given zip archive path.
|
||||
func NewZipFileManifest(archivePath string) (ZipFileManifest, error) {
|
||||
zipReader, err := OpenZip(archivePath)
|
||||
func NewZipFileManifest(ctx context.Context, archivePath string) (ZipFileManifest, error) {
|
||||
zipReader, err := os.Open(archivePath)
|
||||
manifest := make(ZipFileManifest)
|
||||
if err != nil {
|
||||
log.Debugf("unable to open zip archive (%s): %v", archivePath, err)
|
||||
return manifest, err
|
||||
}
|
||||
defer func() {
|
||||
err = zipReader.Close()
|
||||
if err != nil {
|
||||
if err = zipReader.Close(); err != nil {
|
||||
log.Debugf("unable to close zip archive (%s): %+v", archivePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, file := range zipReader.File {
|
||||
manifest.Add(file.Name, file.FileInfo())
|
||||
err = archives.Zip{}.Extract(ctx, zipReader, func(_ context.Context, file archives.FileInfo) error {
|
||||
manifest.Add(file.NameInArchive, file.FileInfo)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return manifest, err
|
||||
}
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
@ -24,7 +25,7 @@ func TestNewZipFileManifest(t *testing.T) {
|
||||
|
||||
archiveFilePath := setupZipFileTest(t, sourceDirPath, false)
|
||||
|
||||
actual, err := NewZipFileManifest(archiveFilePath)
|
||||
actual, err := NewZipFileManifest(context.Background(), archiveFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to extract from unzip archive: %+v", err)
|
||||
}
|
||||
@ -59,7 +60,7 @@ func TestNewZip64FileManifest(t *testing.T) {
|
||||
sourceDirPath := path.Join(cwd, "test-fixtures", "zip-source")
|
||||
archiveFilePath := setupZipFileTest(t, sourceDirPath, true)
|
||||
|
||||
actual, err := NewZipFileManifest(archiveFilePath)
|
||||
actual, err := NewZipFileManifest(context.Background(), archiveFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to extract from unzip archive: %+v", err)
|
||||
}
|
||||
@ -99,7 +100,7 @@ func TestZipFileManifest_GlobMatch(t *testing.T) {
|
||||
|
||||
archiveFilePath := setupZipFileTest(t, sourceDirPath, false)
|
||||
|
||||
z, err := NewZipFileManifest(archiveFilePath)
|
||||
z, err := NewZipFileManifest(context.Background(), archiveFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to extract from unzip archive: %+v", err)
|
||||
}
|
||||
|
||||
@ -1,13 +1,15 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
|
||||
"github.com/anchore/syft/internal/log"
|
||||
)
|
||||
|
||||
@ -25,7 +27,7 @@ type errZipSlipDetected struct {
|
||||
}
|
||||
|
||||
func (e *errZipSlipDetected) Error() string {
|
||||
return fmt.Sprintf("paths are not allowed to resolve outside of the root prefix (%q). Destination: %q", e.Prefix, e.JoinArgs)
|
||||
return fmt.Sprintf("path traversal detected: paths are not allowed to resolve outside of the root prefix (%q). Destination: %q", e.Prefix, e.JoinArgs)
|
||||
}
|
||||
|
||||
type zipTraversalRequest map[string]struct{}
|
||||
@ -39,38 +41,34 @@ func newZipTraverseRequest(paths ...string) zipTraversalRequest {
|
||||
}
|
||||
|
||||
// TraverseFilesInZip enumerates all paths stored within a zip archive using the visitor pattern.
|
||||
func TraverseFilesInZip(archivePath string, visitor func(*zip.File) error, paths ...string) error {
|
||||
func TraverseFilesInZip(ctx context.Context, archivePath string, visitor archives.FileHandler, paths ...string) error {
|
||||
request := newZipTraverseRequest(paths...)
|
||||
|
||||
zipReader, err := OpenZip(archivePath)
|
||||
zipReader, err := os.Open(archivePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open zip archive (%s): %w", archivePath, err)
|
||||
}
|
||||
defer func() {
|
||||
err = zipReader.Close()
|
||||
if err != nil {
|
||||
if err := zipReader.Close(); err != nil {
|
||||
log.Errorf("unable to close zip archive (%s): %+v", archivePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, file := range zipReader.File {
|
||||
return archives.Zip{}.Extract(ctx, zipReader, func(ctx context.Context, file archives.FileInfo) error {
|
||||
// if no paths are given then assume that all files should be traversed
|
||||
if len(paths) > 0 {
|
||||
if _, ok := request[file.Name]; !ok {
|
||||
if _, ok := request[file.NameInArchive]; !ok {
|
||||
// this file path is not of interest
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err = visitor(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return visitor(ctx, file)
|
||||
})
|
||||
}
|
||||
|
||||
// ExtractFromZipToUniqueTempFile extracts select paths for the given archive to a temporary directory, returning file openers for each file extracted.
|
||||
func ExtractFromZipToUniqueTempFile(archivePath, dir string, paths ...string) (map[string]Opener, error) {
|
||||
func ExtractFromZipToUniqueTempFile(ctx context.Context, archivePath, dir string, paths ...string) (map[string]Opener, error) {
|
||||
results := make(map[string]Opener)
|
||||
|
||||
// don't allow for full traversal, only select traversal from given paths
|
||||
@ -78,9 +76,8 @@ func ExtractFromZipToUniqueTempFile(archivePath, dir string, paths ...string) (m
|
||||
return results, nil
|
||||
}
|
||||
|
||||
visitor := func(file *zip.File) error {
|
||||
tempfilePrefix := filepath.Base(filepath.Clean(file.Name)) + "-"
|
||||
|
||||
visitor := func(_ context.Context, file archives.FileInfo) error {
|
||||
tempfilePrefix := filepath.Base(filepath.Clean(file.NameInArchive)) + "-"
|
||||
tempFile, err := os.CreateTemp(dir, tempfilePrefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create temp file: %w", err)
|
||||
@ -92,33 +89,32 @@ func ExtractFromZipToUniqueTempFile(archivePath, dir string, paths ...string) (m
|
||||
|
||||
zippedFile, err := file.Open()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read file=%q from zip=%q: %w", file.Name, archivePath, err)
|
||||
return fmt.Errorf("unable to read file=%q from zip=%q: %w", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
defer func() {
|
||||
err := zippedFile.Close()
|
||||
if err != nil {
|
||||
log.Errorf("unable to close source file=%q from zip=%q: %+v", file.Name, archivePath, err)
|
||||
if err := zippedFile.Close(); err != nil {
|
||||
log.Errorf("unable to close source file=%q from zip=%q: %+v", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if file.FileInfo().IsDir() {
|
||||
return fmt.Errorf("unable to extract directories, only files: %s", file.Name)
|
||||
if file.IsDir() {
|
||||
return fmt.Errorf("unable to extract directories, only files: %s", file.NameInArchive)
|
||||
}
|
||||
|
||||
if err := safeCopy(tempFile, zippedFile); err != nil {
|
||||
return fmt.Errorf("unable to copy source=%q for zip=%q: %w", file.Name, archivePath, err)
|
||||
return fmt.Errorf("unable to copy source=%q for zip=%q: %w", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
|
||||
results[file.Name] = Opener{path: tempFile.Name()}
|
||||
results[file.NameInArchive] = Opener{path: tempFile.Name()}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return results, TraverseFilesInZip(archivePath, visitor, paths...)
|
||||
return results, TraverseFilesInZip(ctx, archivePath, visitor, paths...)
|
||||
}
|
||||
|
||||
// ContentsFromZip extracts select paths for the given archive and returns a set of string contents for each path.
|
||||
func ContentsFromZip(archivePath string, paths ...string) (map[string]string, error) {
|
||||
func ContentsFromZip(ctx context.Context, archivePath string, paths ...string) (map[string]string, error) {
|
||||
results := make(map[string]string)
|
||||
|
||||
// don't allow for full traversal, only select traversal from given paths
|
||||
@ -126,37 +122,38 @@ func ContentsFromZip(archivePath string, paths ...string) (map[string]string, er
|
||||
return results, nil
|
||||
}
|
||||
|
||||
visitor := func(file *zip.File) error {
|
||||
visitor := func(_ context.Context, file archives.FileInfo) error {
|
||||
zippedFile, err := file.Open()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read file=%q from zip=%q: %w", file.Name, archivePath, err)
|
||||
return fmt.Errorf("unable to read file=%q from zip=%q: %w", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := zippedFile.Close(); err != nil {
|
||||
log.Errorf("unable to close source file=%q from zip=%q: %+v", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if file.FileInfo().IsDir() {
|
||||
return fmt.Errorf("unable to extract directories, only files: %s", file.Name)
|
||||
if file.IsDir() {
|
||||
return fmt.Errorf("unable to extract directories, only files: %s", file.NameInArchive)
|
||||
}
|
||||
|
||||
var buffer bytes.Buffer
|
||||
if err := safeCopy(&buffer, zippedFile); err != nil {
|
||||
return fmt.Errorf("unable to copy source=%q for zip=%q: %w", file.Name, archivePath, err)
|
||||
return fmt.Errorf("unable to copy source=%q for zip=%q: %w", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
|
||||
results[file.Name] = buffer.String()
|
||||
results[file.NameInArchive] = buffer.String()
|
||||
|
||||
err = zippedFile.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to close source file=%q from zip=%q: %w", file.Name, archivePath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return results, TraverseFilesInZip(archivePath, visitor, paths...)
|
||||
return results, TraverseFilesInZip(ctx, archivePath, visitor, paths...)
|
||||
}
|
||||
|
||||
// UnzipToDir extracts a zip archive to a target directory.
|
||||
func UnzipToDir(archivePath, targetDir string) error {
|
||||
visitor := func(file *zip.File) error {
|
||||
joinedPath, err := safeJoin(targetDir, file.Name)
|
||||
func UnzipToDir(ctx context.Context, archivePath, targetDir string) error {
|
||||
visitor := func(_ context.Context, file archives.FileInfo) error {
|
||||
joinedPath, err := SafeJoin(targetDir, file.NameInArchive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -164,11 +161,11 @@ func UnzipToDir(archivePath, targetDir string) error {
|
||||
return extractSingleFile(file, joinedPath, archivePath)
|
||||
}
|
||||
|
||||
return TraverseFilesInZip(archivePath, visitor)
|
||||
return TraverseFilesInZip(ctx, archivePath, visitor)
|
||||
}
|
||||
|
||||
// safeJoin ensures that any destinations do not resolve to a path above the prefix path.
|
||||
func safeJoin(prefix string, dest ...string) (string, error) {
|
||||
// SafeJoin ensures that any destinations do not resolve to a path above the prefix path.
|
||||
func SafeJoin(prefix string, dest ...string) (string, error) {
|
||||
joinResult := filepath.Join(append([]string{prefix}, dest...)...)
|
||||
cleanJoinResult := filepath.Clean(joinResult)
|
||||
if !strings.HasPrefix(cleanJoinResult, filepath.Clean(prefix)) {
|
||||
@ -181,13 +178,18 @@ func safeJoin(prefix string, dest ...string) (string, error) {
|
||||
return joinResult, nil
|
||||
}
|
||||
|
||||
func extractSingleFile(file *zip.File, expandedFilePath, archivePath string) error {
|
||||
func extractSingleFile(file archives.FileInfo, expandedFilePath, archivePath string) error {
|
||||
zippedFile, err := file.Open()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read file=%q from zip=%q: %w", file.Name, archivePath, err)
|
||||
return fmt.Errorf("unable to read file=%q from zip=%q: %w", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := zippedFile.Close(); err != nil {
|
||||
log.Errorf("unable to close source file=%q from zip=%q: %+v", file.NameInArchive, archivePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if file.FileInfo().IsDir() {
|
||||
if file.IsDir() {
|
||||
err = os.MkdirAll(expandedFilePath, file.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create dir=%q from zip=%q: %w", expandedFilePath, archivePath, err)
|
||||
@ -202,20 +204,16 @@ func extractSingleFile(file *zip.File, expandedFilePath, archivePath string) err
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create dest file=%q from zip=%q: %w", expandedFilePath, archivePath, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := outputFile.Close(); err != nil {
|
||||
log.Errorf("unable to close dest file=%q from zip=%q: %+v", outputFile.Name(), archivePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := safeCopy(outputFile, zippedFile); err != nil {
|
||||
return fmt.Errorf("unable to copy source=%q to dest=%q for zip=%q: %w", file.Name, outputFile.Name(), archivePath, err)
|
||||
}
|
||||
|
||||
err = outputFile.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to close dest file=%q from zip=%q: %w", outputFile.Name(), archivePath, err)
|
||||
return fmt.Errorf("unable to copy source=%q to dest=%q for zip=%q: %w", file.NameInArchive, outputFile.Name(), archivePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = zippedFile.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to close source file=%q from zip=%q: %w", file.Name, archivePath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4,6 +4,8 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@ -17,6 +19,7 @@ import (
|
||||
|
||||
"github.com/go-test/deep"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func equal(r1, r2 io.Reader) (bool, error) {
|
||||
@ -55,7 +58,7 @@ func TestUnzipToDir(t *testing.T) {
|
||||
expectedPaths := len(expectedZipArchiveEntries)
|
||||
observedPaths := 0
|
||||
|
||||
err = UnzipToDir(archiveFilePath, unzipDestinationDir)
|
||||
err = UnzipToDir(context.Background(), archiveFilePath, unzipDestinationDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unzip archive: %+v", err)
|
||||
}
|
||||
@ -145,7 +148,7 @@ func TestContentsFromZip(t *testing.T) {
|
||||
paths = append(paths, p)
|
||||
}
|
||||
|
||||
actual, err := ContentsFromZip(archivePath, paths...)
|
||||
actual, err := ContentsFromZip(context.Background(), archivePath, paths...)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to extract from unzip archive: %+v", err)
|
||||
}
|
||||
@ -307,9 +310,528 @@ func TestSafeJoin(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("%+v:%+v", test.prefix, test.args), func(t *testing.T) {
|
||||
actual, err := safeJoin(test.prefix, test.args...)
|
||||
actual, err := SafeJoin(test.prefix, test.args...)
|
||||
test.errAssertion(t, err)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSymlinkProtection demonstrates that SafeJoin protects against symlink-based
|
||||
// directory traversal attacks by validating that archive entry paths cannot escape
|
||||
// the extraction directory.
|
||||
func TestSafeJoin_SymlinkProtection(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
archivePath string // Path as it would appear in the archive
|
||||
expectError bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "path traversal via ../",
|
||||
archivePath: "../../../outside/file.txt",
|
||||
expectError: true,
|
||||
description: "Archive entry with ../ trying to escape extraction dir",
|
||||
},
|
||||
{
|
||||
name: "absolute path symlink target",
|
||||
archivePath: "../../../sensitive.txt",
|
||||
expectError: true,
|
||||
description: "Simulates symlink pointing outside via relative path",
|
||||
},
|
||||
{
|
||||
name: "safe relative path within extraction dir",
|
||||
archivePath: "subdir/safe.txt",
|
||||
expectError: false,
|
||||
description: "Normal file path that stays within extraction directory",
|
||||
},
|
||||
{
|
||||
name: "safe path with internal ../",
|
||||
archivePath: "dir1/../dir2/file.txt",
|
||||
expectError: false,
|
||||
description: "Path with ../ that still resolves within extraction dir",
|
||||
},
|
||||
{
|
||||
name: "deeply nested traversal",
|
||||
archivePath: "../../../../../../tmp/evil.txt",
|
||||
expectError: true,
|
||||
description: "Multiple levels of ../ trying to escape",
|
||||
},
|
||||
{
|
||||
name: "single parent directory escape",
|
||||
archivePath: "../",
|
||||
expectError: true,
|
||||
description: "Simple one-level escape attempt",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create temp directories to simulate extraction scenario
|
||||
tmpDir := t.TempDir()
|
||||
extractDir := filepath.Join(tmpDir, "extract")
|
||||
outsideDir := filepath.Join(tmpDir, "outside")
|
||||
|
||||
require.NoError(t, os.MkdirAll(extractDir, 0755))
|
||||
require.NoError(t, os.MkdirAll(outsideDir, 0755))
|
||||
|
||||
// Create a file outside extraction dir that an attacker might target
|
||||
outsideFile := filepath.Join(outsideDir, "sensitive.txt")
|
||||
require.NoError(t, os.WriteFile(outsideFile, []byte("sensitive data"), 0644))
|
||||
|
||||
// Test SafeJoin - this is what happens when processing archive entries
|
||||
result, err := SafeJoin(extractDir, tt.archivePath)
|
||||
|
||||
if tt.expectError {
|
||||
// Should block malicious paths
|
||||
require.Error(t, err, "Expected SafeJoin to reject malicious path")
|
||||
var zipSlipErr *errZipSlipDetected
|
||||
assert.ErrorAs(t, err, &zipSlipErr, "Error should be errZipSlipDetected type")
|
||||
assert.Empty(t, result, "Result should be empty for blocked paths")
|
||||
} else {
|
||||
// Should allow safe paths
|
||||
require.NoError(t, err, "Expected SafeJoin to allow safe path")
|
||||
assert.NotEmpty(t, result, "Result should not be empty for safe paths")
|
||||
assert.True(t, strings.HasPrefix(filepath.Clean(result), filepath.Clean(extractDir)),
|
||||
"Safe path should resolve within extraction directory")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestUnzipToDir_SymlinkAttacks tests UnzipToDir function with malicious ZIP archives
|
||||
// containing symlink entries that attempt path traversal attacks.
|
||||
//
|
||||
// EXPECTED BEHAVIOR: UnzipToDir should either:
|
||||
// 1. Detect and reject symlinks explicitly with a security error, OR
|
||||
// 2. Extract them safely (library converts symlinks to regular files)
|
||||
func TestUnzipToDir_SymlinkAttacks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
symlinkName string
|
||||
fileName string
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "direct symlink to outside directory",
|
||||
symlinkName: "evil_link",
|
||||
fileName: "evil_link/payload.txt",
|
||||
errContains: "not a directory", // attempt to write through symlink leaf (which is not a directory)
|
||||
},
|
||||
{
|
||||
name: "directory symlink attack",
|
||||
symlinkName: "safe_dir/link",
|
||||
fileName: "safe_dir/link/payload.txt",
|
||||
errContains: "not a directory", // attempt to write through symlink (which is not a directory)
|
||||
},
|
||||
{
|
||||
name: "symlink without payload file",
|
||||
symlinkName: "standalone_link",
|
||||
fileName: "", // no payload file
|
||||
errContains: "", // no error expected, symlink without payload is safe
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// create outside target directory
|
||||
outsideDir := filepath.Join(tempDir, "outside_target")
|
||||
require.NoError(t, os.MkdirAll(outsideDir, 0755))
|
||||
|
||||
// create extraction directory
|
||||
extractDir := filepath.Join(tempDir, "extract")
|
||||
require.NoError(t, os.MkdirAll(extractDir, 0755))
|
||||
|
||||
maliciousZip := createMaliciousZipWithSymlink(t, tempDir, tt.symlinkName, outsideDir, tt.fileName)
|
||||
|
||||
err := UnzipToDir(context.Background(), maliciousZip, extractDir)
|
||||
|
||||
// check error expectations
|
||||
if tt.errContains != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tt.errContains)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
analyzeExtractionDirectory(t, extractDir)
|
||||
|
||||
// check if payload file escaped extraction directory
|
||||
if tt.fileName != "" {
|
||||
maliciousFile := filepath.Join(outsideDir, filepath.Base(tt.fileName))
|
||||
checkFileOutsideExtraction(t, maliciousFile)
|
||||
}
|
||||
|
||||
// check if symlink was created pointing outside
|
||||
symlinkPath := filepath.Join(extractDir, tt.symlinkName)
|
||||
checkSymlinkCreation(t, symlinkPath, extractDir, outsideDir)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestContentsFromZip_SymlinkAttacks tests the ContentsFromZip function with malicious
|
||||
// ZIP archives containing symlink entries.
|
||||
//
|
||||
// EXPECTED BEHAVIOR: ContentsFromZip should either:
|
||||
// 1. Reject symlinks explicitly, OR
|
||||
// 2. Return empty content for symlinks (library behavior)
|
||||
//
|
||||
// Though ContentsFromZip doesn't write to disk, but if symlinks are followed, it could read sensitive
|
||||
// files from outside the archive.
|
||||
func TestContentsFromZip_SymlinkAttacks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
symlinkName string
|
||||
symlinkTarget string
|
||||
requestPath string
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "request symlink entry directly",
|
||||
symlinkName: "evil_link",
|
||||
symlinkTarget: "/etc/hosts", // attempt to read sensitive file
|
||||
requestPath: "evil_link",
|
||||
errContains: "", // no error expected - library returns symlink metadata
|
||||
},
|
||||
{
|
||||
name: "symlink in nested directory",
|
||||
symlinkName: "nested/link",
|
||||
symlinkTarget: "/etc/hosts",
|
||||
requestPath: "nested/link",
|
||||
errContains: "", // no error expected - library returns symlink metadata
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// create malicious ZIP with symlink entry (no payload file needed)
|
||||
maliciousZip := createMaliciousZipWithSymlink(t, tempDir, tt.symlinkName, tt.symlinkTarget, "")
|
||||
|
||||
contents, err := ContentsFromZip(context.Background(), maliciousZip, tt.requestPath)
|
||||
|
||||
// check error expectations
|
||||
if tt.errContains != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tt.errContains)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// verify symlink handling - library should return symlink target as content (metadata)
|
||||
content, found := contents[tt.requestPath]
|
||||
require.True(t, found, "symlink entry should be found in results")
|
||||
|
||||
// verify symlink was NOT followed (content should be target path or empty)
|
||||
if content != "" && content != tt.symlinkTarget {
|
||||
// content is not empty and not the symlink target - check if actual file was read
|
||||
if _, statErr := os.Stat(tt.symlinkTarget); statErr == nil {
|
||||
targetContent, readErr := os.ReadFile(tt.symlinkTarget)
|
||||
if readErr == nil && string(targetContent) == content {
|
||||
t.Errorf("critical issue!... symlink was FOLLOWED and external file content was read!")
|
||||
t.Logf(" symlink: %s → %s", tt.requestPath, tt.symlinkTarget)
|
||||
t.Logf(" content length: %d bytes", len(content))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractFromZipToUniqueTempFile_SymlinkAttacks tests the ExtractFromZipToUniqueTempFile
|
||||
// function with malicious ZIP archives containing symlink entries.
|
||||
//
|
||||
// EXPECTED BEHAVIOR: ExtractFromZipToUniqueTempFile should either:
|
||||
// 1. Reject symlinks explicitly, OR
|
||||
// 2. Extract them safely (library converts to empty files, filepath.Base sanitizes names)
|
||||
//
|
||||
// This function uses filepath.Base() on the archive entry name for temp file prefix and
|
||||
// os.CreateTemp() which creates files in the specified directory, so it should be protected.
|
||||
func TestExtractFromZipToUniqueTempFile_SymlinkAttacks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
symlinkName string
|
||||
symlinkTarget string
|
||||
requestPath string
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "extract symlink entry to temp file",
|
||||
symlinkName: "evil_link",
|
||||
symlinkTarget: "/etc/passwd",
|
||||
requestPath: "evil_link",
|
||||
errContains: "", // no error expected - library extracts symlink metadata
|
||||
},
|
||||
{
|
||||
name: "extract nested symlink",
|
||||
symlinkName: "nested/dir/link",
|
||||
symlinkTarget: "/tmp/outside",
|
||||
requestPath: "nested/dir/link",
|
||||
errContains: "", // no error expected
|
||||
},
|
||||
{
|
||||
name: "extract path traversal symlink name",
|
||||
symlinkName: "../../escape",
|
||||
symlinkTarget: "/tmp/outside",
|
||||
requestPath: "../../escape",
|
||||
errContains: "", // no error expected - filepath.Base sanitizes name
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
maliciousZip := createMaliciousZipWithSymlink(t, tempDir, tt.symlinkName, tt.symlinkTarget, "")
|
||||
|
||||
// create temp directory for extraction
|
||||
extractTempDir := filepath.Join(tempDir, "temp_extract")
|
||||
require.NoError(t, os.MkdirAll(extractTempDir, 0755))
|
||||
|
||||
openers, err := ExtractFromZipToUniqueTempFile(context.Background(), maliciousZip, extractTempDir, tt.requestPath)
|
||||
|
||||
// check error expectations
|
||||
if tt.errContains != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tt.errContains)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// verify symlink was extracted
|
||||
opener, found := openers[tt.requestPath]
|
||||
require.True(t, found, "symlink entry should be extracted")
|
||||
|
||||
// verify temp file is within temp directory
|
||||
tempFilePath := opener.path
|
||||
cleanTempDir := filepath.Clean(extractTempDir)
|
||||
cleanTempFile := filepath.Clean(tempFilePath)
|
||||
require.True(t, strings.HasPrefix(cleanTempFile, cleanTempDir),
|
||||
"temp file must be within temp directory: %s not in %s", cleanTempFile, cleanTempDir)
|
||||
|
||||
// verify symlink was NOT followed (content should be target path or empty)
|
||||
f, openErr := opener.Open()
|
||||
require.NoError(t, openErr)
|
||||
defer f.Close()
|
||||
|
||||
content, readErr := io.ReadAll(f)
|
||||
require.NoError(t, readErr)
|
||||
|
||||
// check if symlink was followed (content matches actual file)
|
||||
if len(content) > 0 && string(content) != tt.symlinkTarget {
|
||||
if _, statErr := os.Stat(tt.symlinkTarget); statErr == nil {
|
||||
targetContent, readErr := os.ReadFile(tt.symlinkTarget)
|
||||
if readErr == nil && string(targetContent) == string(content) {
|
||||
t.Errorf("critical issue!... symlink was FOLLOWED and external file content was copied!")
|
||||
t.Logf(" symlink: %s → %s", tt.requestPath, tt.symlinkTarget)
|
||||
t.Logf(" content length: %d bytes", len(content))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// forensicFindings contains the results of analyzing an extraction directory
|
||||
type forensicFindings struct {
|
||||
symlinksFound []forensicSymlink
|
||||
regularFiles []string
|
||||
directories []string
|
||||
symlinkVulnerabilities []string
|
||||
}
|
||||
|
||||
type forensicSymlink struct {
|
||||
path string
|
||||
target string
|
||||
escapesExtraction bool
|
||||
resolvedPath string
|
||||
}
|
||||
|
||||
// analyzeExtractionDirectory walks the extraction directory and detects symlinks that point
|
||||
// outside the extraction directory. It is silent unless vulnerabilities are found.
|
||||
func analyzeExtractionDirectory(t *testing.T, extractDir string) forensicFindings {
|
||||
t.Helper()
|
||||
|
||||
findings := forensicFindings{}
|
||||
|
||||
filepath.Walk(extractDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
// only log if there's an error walking the directory
|
||||
t.Logf("Error walking %s: %v", path, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath := strings.TrimPrefix(path, extractDir+"/")
|
||||
if relPath == "" {
|
||||
relPath = "."
|
||||
}
|
||||
|
||||
// use Lstat to detect symlinks without following them
|
||||
linfo, lerr := os.Lstat(path)
|
||||
if lerr == nil && linfo.Mode()&os.ModeSymlink != 0 {
|
||||
target, _ := os.Readlink(path)
|
||||
|
||||
// resolve to see where it actually points
|
||||
var resolvedPath string
|
||||
var escapesExtraction bool
|
||||
|
||||
if filepath.IsAbs(target) {
|
||||
// absolute symlink
|
||||
resolvedPath = target
|
||||
cleanExtractDir := filepath.Clean(extractDir)
|
||||
escapesExtraction = !strings.HasPrefix(filepath.Clean(target), cleanExtractDir)
|
||||
|
||||
if escapesExtraction {
|
||||
t.Errorf("critical issue!... absolute symlink created: %s → %s", relPath, target)
|
||||
t.Logf(" this symlink points outside the extraction directory")
|
||||
findings.symlinkVulnerabilities = append(findings.symlinkVulnerabilities,
|
||||
fmt.Sprintf("absolute symlink: %s → %s", relPath, target))
|
||||
}
|
||||
} else {
|
||||
// relative symlink - resolve it
|
||||
resolvedPath = filepath.Join(filepath.Dir(path), target)
|
||||
cleanResolved := filepath.Clean(resolvedPath)
|
||||
cleanExtractDir := filepath.Clean(extractDir)
|
||||
|
||||
escapesExtraction = !strings.HasPrefix(cleanResolved, cleanExtractDir)
|
||||
|
||||
if escapesExtraction {
|
||||
t.Errorf("critical issue!... symlink escapes extraction dir: %s → %s", relPath, target)
|
||||
t.Logf(" symlink resolves to: %s (outside extraction directory)", cleanResolved)
|
||||
findings.symlinkVulnerabilities = append(findings.symlinkVulnerabilities,
|
||||
fmt.Sprintf("relative symlink escape: %s → %s (resolves to %s)", relPath, target, cleanResolved))
|
||||
}
|
||||
}
|
||||
|
||||
findings.symlinksFound = append(findings.symlinksFound, forensicSymlink{
|
||||
path: relPath,
|
||||
target: target,
|
||||
escapesExtraction: escapesExtraction,
|
||||
resolvedPath: resolvedPath,
|
||||
})
|
||||
} else {
|
||||
// regular file or directory - collect silently
|
||||
if info.IsDir() {
|
||||
findings.directories = append(findings.directories, relPath)
|
||||
} else {
|
||||
findings.regularFiles = append(findings.regularFiles, relPath)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return findings
|
||||
}
|
||||
|
||||
// checkFileOutsideExtraction checks if a file was written outside the extraction directory.
|
||||
// Returns true if the file exists (vulnerability), false otherwise. Silent on success.
|
||||
func checkFileOutsideExtraction(t *testing.T, filePath string) bool {
|
||||
t.Helper()
|
||||
|
||||
if stat, err := os.Stat(filePath); err == nil {
|
||||
content, _ := os.ReadFile(filePath)
|
||||
t.Errorf("critical issue!... file written OUTSIDE extraction directory!")
|
||||
t.Logf(" location: %s", filePath)
|
||||
t.Logf(" size: %d bytes", stat.Size())
|
||||
t.Logf(" content: %s", string(content))
|
||||
t.Logf(" ...this means an attacker can write files to arbitrary locations on the filesystem")
|
||||
return true
|
||||
}
|
||||
// no file found outside extraction directory...
|
||||
return false
|
||||
}
|
||||
|
||||
// checkSymlinkCreation verifies if a symlink was created at the expected path and reports
|
||||
// whether it points outside the extraction directory. Silent unless a symlink is found.
|
||||
func checkSymlinkCreation(t *testing.T, symlinkPath, extractDir, expectedTarget string) bool {
|
||||
t.Helper()
|
||||
|
||||
if linfo, err := os.Lstat(symlinkPath); err == nil {
|
||||
if linfo.Mode()&os.ModeSymlink != 0 {
|
||||
target, _ := os.Readlink(symlinkPath)
|
||||
|
||||
if expectedTarget != "" && target == expectedTarget {
|
||||
t.Errorf("critical issue!... symlink pointing outside extraction dir was created!")
|
||||
t.Logf(" Symlink: %s → %s", symlinkPath, target)
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if it escapes even if target doesn't match expected
|
||||
if filepath.IsAbs(target) {
|
||||
cleanExtractDir := filepath.Clean(extractDir)
|
||||
if !strings.HasPrefix(filepath.Clean(target), cleanExtractDir) {
|
||||
t.Errorf("critical issue!... absolute symlink escapes extraction dir!")
|
||||
t.Logf(" symlink: %s → %s", symlinkPath, target)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
// if it exists but is not a symlink, that's good (attack was thwarted)...
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// createMaliciousZipWithSymlink creates a ZIP archive containing a symlink entry pointing to an arbitrary target,
|
||||
// followed by a file entry that attempts to write through that symlink.
|
||||
// returns the path to the created ZIP archive.
|
||||
func createMaliciousZipWithSymlink(t *testing.T, tempDir, symlinkName, symlinkTarget, fileName string) string {
|
||||
t.Helper()
|
||||
|
||||
maliciousZip := filepath.Join(tempDir, "malicious.zip")
|
||||
zipFile, err := os.Create(maliciousZip)
|
||||
require.NoError(t, err)
|
||||
defer zipFile.Close()
|
||||
|
||||
zw := zip.NewWriter(zipFile)
|
||||
|
||||
// create parent directories if the symlink is nested
|
||||
if dir := filepath.Dir(symlinkName); dir != "." {
|
||||
dirHeader := &zip.FileHeader{
|
||||
Name: dir + "/",
|
||||
Method: zip.Store,
|
||||
}
|
||||
dirHeader.SetMode(os.ModeDir | 0755)
|
||||
_, err = zw.CreateHeader(dirHeader)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// create symlink entry pointing outside extraction directory
|
||||
// note: ZIP format stores symlinks as regular files with the target path as content
|
||||
symlinkHeader := &zip.FileHeader{
|
||||
Name: symlinkName,
|
||||
Method: zip.Store,
|
||||
}
|
||||
symlinkHeader.SetMode(os.ModeSymlink | 0755)
|
||||
|
||||
symlinkWriter, err := zw.CreateHeader(symlinkHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
// write the symlink target as the file content (this is how ZIP stores symlinks)
|
||||
_, err = symlinkWriter.Write([]byte(symlinkTarget))
|
||||
require.NoError(t, err)
|
||||
|
||||
// create file entry that will be written through the symlink
|
||||
if fileName != "" {
|
||||
payloadContent := []byte("MALICIOUS PAYLOAD - This should NOT be written outside extraction dir!")
|
||||
payloadHeader := &zip.FileHeader{
|
||||
Name: fileName,
|
||||
Method: zip.Deflate,
|
||||
}
|
||||
payloadHeader.SetMode(0644)
|
||||
|
||||
payloadWriter, err := zw.CreateHeader(payloadHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = payloadWriter.Write(payloadContent)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NoError(t, zw.Close())
|
||||
require.NoError(t, zipFile.Close())
|
||||
|
||||
return maliciousZip
|
||||
}
|
||||
|
||||
@ -1,229 +0,0 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/anchore/syft/internal/log"
|
||||
)
|
||||
|
||||
// directoryEndLen, readByf, directoryEnd, and findSignatureInBlock were copied from the golang stdlib, specifically:
|
||||
// - https://github.com/golang/go/blob/go1.16.4/src/archive/zip/struct.go
|
||||
// - https://github.com/golang/go/blob/go1.16.4/src/archive/zip/reader.go
|
||||
// findArchiveStartOffset is derived from the same stdlib utils, specifically the readDirectoryEnd function.
|
||||
|
||||
const (
|
||||
directoryEndLen = 22
|
||||
directory64LocLen = 20
|
||||
directory64EndLen = 56
|
||||
directory64LocSignature = 0x07064b50
|
||||
directory64EndSignature = 0x06064b50
|
||||
)
|
||||
|
||||
// ZipReadCloser is a drop-in replacement for zip.ReadCloser (from zip.OpenReader) that additionally considers zips
|
||||
// that have bytes prefixed to the front of the archive (common with self-extracting jars).
|
||||
type ZipReadCloser struct {
|
||||
*zip.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// OpenZip provides a ZipReadCloser for the given filepath.
|
||||
func OpenZip(filepath string) (*ZipReadCloser, error) {
|
||||
f, err := os.Open(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// some archives may have bytes prepended to the front of the archive, such as with self executing JARs. We first
|
||||
// need to find the start of the archive and keep track of this offset.
|
||||
offset, err := findArchiveStartOffset(f, fi.Size())
|
||||
if err != nil {
|
||||
log.Debugf("cannot find beginning of zip archive=%q : %v", filepath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, fmt.Errorf("unable to seek to beginning of archive: %w", err)
|
||||
}
|
||||
|
||||
if offset > math.MaxInt64 {
|
||||
return nil, fmt.Errorf("archive start offset too large: %v", offset)
|
||||
}
|
||||
offset64 := int64(offset)
|
||||
|
||||
size := fi.Size() - offset64
|
||||
|
||||
r, err := zip.NewReader(io.NewSectionReader(f, offset64, size), size)
|
||||
if err != nil {
|
||||
log.Debugf("unable to open ZipReadCloser @ %q: %v", filepath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ZipReadCloser{
|
||||
Reader: r,
|
||||
Closer: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type readBuf []byte
|
||||
|
||||
func (b *readBuf) uint16() uint16 {
|
||||
v := binary.LittleEndian.Uint16(*b)
|
||||
*b = (*b)[2:]
|
||||
return v
|
||||
}
|
||||
|
||||
func (b *readBuf) uint32() uint32 {
|
||||
v := binary.LittleEndian.Uint32(*b)
|
||||
*b = (*b)[4:]
|
||||
return v
|
||||
}
|
||||
|
||||
func (b *readBuf) uint64() uint64 {
|
||||
v := binary.LittleEndian.Uint64(*b)
|
||||
*b = (*b)[8:]
|
||||
return v
|
||||
}
|
||||
|
||||
type directoryEnd struct {
|
||||
diskNbr uint32 // unused
|
||||
dirDiskNbr uint32 // unused
|
||||
dirRecordsThisDisk uint64 // unused
|
||||
directoryRecords uint64
|
||||
directorySize uint64
|
||||
directoryOffset uint64 // relative to file
|
||||
}
|
||||
|
||||
// note: this is derived from readDirectoryEnd within the archive/zip package
|
||||
func findArchiveStartOffset(r io.ReaderAt, size int64) (startOfArchive uint64, err error) {
|
||||
// look for directoryEndSignature in the last 1k, then in the last 65k
|
||||
var buf []byte
|
||||
var directoryEndOffset int64
|
||||
for i, bLen := range []int64{1024, 65 * 1024} {
|
||||
if bLen > size {
|
||||
bLen = size
|
||||
}
|
||||
buf = make([]byte, int(bLen))
|
||||
if _, err := r.ReadAt(buf, size-bLen); err != nil && !errors.Is(err, io.EOF) {
|
||||
return 0, err
|
||||
}
|
||||
if p := findSignatureInBlock(buf); p >= 0 {
|
||||
buf = buf[p:]
|
||||
directoryEndOffset = size - bLen + int64(p)
|
||||
break
|
||||
}
|
||||
if i == 1 || bLen == size {
|
||||
return 0, zip.ErrFormat
|
||||
}
|
||||
}
|
||||
|
||||
if buf == nil {
|
||||
// we were unable to find the directoryEndSignature block
|
||||
return 0, zip.ErrFormat
|
||||
}
|
||||
|
||||
// read header into struct
|
||||
b := readBuf(buf[4:]) // skip signature
|
||||
d := &directoryEnd{
|
||||
diskNbr: uint32(b.uint16()),
|
||||
dirDiskNbr: uint32(b.uint16()),
|
||||
dirRecordsThisDisk: uint64(b.uint16()),
|
||||
directoryRecords: uint64(b.uint16()),
|
||||
directorySize: uint64(b.uint32()),
|
||||
directoryOffset: uint64(b.uint32()),
|
||||
}
|
||||
// Calculate where the zip data actually begins
|
||||
|
||||
// These values mean that the file can be a zip64 file
|
||||
if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
|
||||
p, err := findDirectory64End(r, directoryEndOffset)
|
||||
if err == nil && p >= 0 {
|
||||
directoryEndOffset = p
|
||||
err = readDirectory64End(r, p, d)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
startOfArchive = uint64(directoryEndOffset) - d.directorySize - d.directoryOffset
|
||||
|
||||
// Make sure directoryOffset points to somewhere in our file.
|
||||
if d.directoryOffset >= uint64(size) {
|
||||
return 0, zip.ErrFormat
|
||||
}
|
||||
return startOfArchive, nil
|
||||
}
|
||||
|
||||
// findDirectory64End tries to read the zip64 locator just before the
|
||||
// directory end and returns the offset of the zip64 directory end if
|
||||
// found.
|
||||
func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
|
||||
locOffset := directoryEndOffset - directory64LocLen
|
||||
if locOffset < 0 {
|
||||
return -1, nil // no need to look for a header outside the file
|
||||
}
|
||||
buf := make([]byte, directory64LocLen)
|
||||
if _, err := r.ReadAt(buf, locOffset); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
b := readBuf(buf)
|
||||
if sig := b.uint32(); sig != directory64LocSignature {
|
||||
return -1, nil
|
||||
}
|
||||
if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
|
||||
return -1, nil // the file is not a valid zip64-file
|
||||
}
|
||||
p := b.uint64() // relative offset of the zip64 end of central directory record
|
||||
if b.uint32() != 1 { // total number of disks
|
||||
return -1, nil // the file is not a valid zip64-file
|
||||
}
|
||||
return int64(p), nil
|
||||
}
|
||||
|
||||
// readDirectory64End reads the zip64 directory end and updates the
|
||||
// directory end with the zip64 directory end values.
|
||||
func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
|
||||
buf := make([]byte, directory64EndLen)
|
||||
if _, err := r.ReadAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b := readBuf(buf)
|
||||
if sig := b.uint32(); sig != directory64EndSignature {
|
||||
return errors.New("could not read directory64End")
|
||||
}
|
||||
|
||||
b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16)
|
||||
d.diskNbr = b.uint32() // number of this disk
|
||||
d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory
|
||||
d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
|
||||
d.directoryRecords = b.uint64() // total number of entries in the central directory
|
||||
d.directorySize = b.uint64() // size of the central directory
|
||||
d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findSignatureInBlock(b []byte) int {
|
||||
for i := len(b) - directoryEndLen; i >= 0; i-- {
|
||||
// defined from directoryEndSignature
|
||||
if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
|
||||
// n is length of comment
|
||||
n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
|
||||
if n+directoryEndLen+i <= len(b) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
@ -1,50 +0,0 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindArchiveStartOffset(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
archivePrep func(tb testing.TB) string
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "standard, non-nested zip",
|
||||
archivePrep: prepZipSourceFixture,
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "zip with prepended bytes",
|
||||
archivePrep: prependZipSourceFixtureWithString(t, "junk at the beginning of the file..."),
|
||||
expected: 36,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
archivePath := test.archivePrep(t)
|
||||
f, err := os.Open(archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("could not open archive %q: %+v", archivePath, err)
|
||||
}
|
||||
fi, err := os.Stat(f.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to stat archive: %+v", err)
|
||||
}
|
||||
|
||||
actual, err := findArchiveStartOffset(f, fi.Size())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find offset: %+v", err)
|
||||
}
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
1
internal/jsonschema/README.md
Normal file
1
internal/jsonschema/README.md
Normal file
@ -0,0 +1 @@
|
||||
Please see [schema/json/README.md](../../schema/json/README.md) for more information on the JSON schema files in this directory.
|
||||
159
internal/jsonschema/comments.go
Normal file
159
internal/jsonschema/comments.go
Normal file
@ -0,0 +1,159 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/invopop/jsonschema"
|
||||
)
|
||||
|
||||
func copyAliasFieldComments(commentMap map[string]string, repoRoot string) {
|
||||
// find all type aliases by parsing Go source files
|
||||
aliases := findTypeAliases(repoRoot)
|
||||
|
||||
// for each alias, copy field comments from the source type
|
||||
for aliasName, sourceName := range aliases {
|
||||
// find all field comments for the source type
|
||||
for key, comment := range commentMap {
|
||||
// check if this is a field comment for the source type
|
||||
// format: "github.com/anchore/syft/syft/pkg.SourceType.FieldName"
|
||||
if strings.Contains(key, "."+sourceName+".") {
|
||||
// create the corresponding key for the alias
|
||||
aliasKey := strings.Replace(key, "."+sourceName+".", "."+aliasName+".", 1)
|
||||
commentMap[aliasKey] = comment
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func findTypeAliases(repoRoot string) map[string]string {
|
||||
aliases := make(map[string]string)
|
||||
fset := token.NewFileSet()
|
||||
|
||||
// walk through all Go files in the repo
|
||||
err := filepath.Walk(repoRoot, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil || info.IsDir() || !strings.HasSuffix(path, ".go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// parse the file
|
||||
file, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// look for type alias declarations
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
typeSpec, ok := n.(*ast.TypeSpec)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// check if this is a type alias (e.g., type A B where B is an identifier)
|
||||
ident, ok := typeSpec.Type.(*ast.Ident)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// store the alias mapping: aliasName -> sourceName
|
||||
aliases[typeSpec.Name.Name] = ident.Name
|
||||
return true
|
||||
})
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: failed to find type aliases: %v\n", err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return aliases
|
||||
}
|
||||
|
||||
func hasDescriptionInAlternatives(schema *jsonschema.Schema) bool {
|
||||
// check oneOf alternatives
|
||||
for _, alt := range schema.OneOf {
|
||||
if alt.Description != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// check anyOf alternatives
|
||||
for _, alt := range schema.AnyOf {
|
||||
if alt.Description != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func warnMissingDescriptions(schema *jsonschema.Schema, metadataNames []string) { //nolint:gocognit
|
||||
var missingTypeDescriptions []string
|
||||
var missingFieldDescriptions []string
|
||||
|
||||
// check metadata types for missing descriptions
|
||||
for _, name := range metadataNames {
|
||||
def, ok := schema.Definitions[name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// check if type has a description
|
||||
if def.Description == "" {
|
||||
missingTypeDescriptions = append(missingTypeDescriptions, name)
|
||||
}
|
||||
|
||||
// check if fields have descriptions
|
||||
if def.Properties != nil {
|
||||
for _, fieldName := range def.Properties.Keys() {
|
||||
fieldSchemaRaw, _ := def.Properties.Get(fieldName)
|
||||
fieldSchema, ok := fieldSchemaRaw.(*jsonschema.Schema)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip if field has a description
|
||||
if fieldSchema.Description != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip if field is a reference (descriptions come from the referenced type)
|
||||
if fieldSchema.Ref != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip if field is an array/object with items that are references
|
||||
if fieldSchema.Items != nil && fieldSchema.Items.Ref != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip if field uses oneOf/anyOf with descriptions in the alternatives
|
||||
if hasDescriptionInAlternatives(fieldSchema) {
|
||||
continue
|
||||
}
|
||||
|
||||
missingFieldDescriptions = append(missingFieldDescriptions, fmt.Sprintf("%s.%s", name, fieldName))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// report findings
|
||||
if len(missingTypeDescriptions) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "\nwarning: %d metadata types are missing descriptions:\n", len(missingTypeDescriptions))
|
||||
for _, name := range missingTypeDescriptions {
|
||||
fmt.Fprintf(os.Stderr, " - %s\n", name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingFieldDescriptions) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "\nwarning: %d fields are missing descriptions:\n", len(missingFieldDescriptions))
|
||||
for _, field := range missingFieldDescriptions {
|
||||
fmt.Fprintf(os.Stderr, " - %s\n", field)
|
||||
}
|
||||
}
|
||||
}
|
||||
382
internal/jsonschema/comments_test.go
Normal file
382
internal/jsonschema/comments_test.go
Normal file
@ -0,0 +1,382 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/iancoleman/orderedmap"
|
||||
"github.com/invopop/jsonschema"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestCopyAliasFieldComments verifies that field comments from source types are correctly copied to alias types.
|
||||
// This is important for type aliases like `type RpmArchive RpmDBEntry` where the alias should inherit all field descriptions.
|
||||
func TestCopyAliasFieldComments(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
commentMap map[string]string
|
||||
aliases map[string]string
|
||||
wantComments map[string]string
|
||||
}{
|
||||
{
|
||||
name: "copies field comments from source type to alias",
|
||||
commentMap: map[string]string{
|
||||
"github.com/anchore/syft/syft/pkg.RpmDBEntry": "RpmDBEntry represents all captured data from a RPM DB package entry.",
|
||||
"github.com/anchore/syft/syft/pkg.RpmDBEntry.Name": "Name is the RPM package name.",
|
||||
"github.com/anchore/syft/syft/pkg.RpmDBEntry.Epoch": "Epoch is the version epoch.",
|
||||
},
|
||||
aliases: map[string]string{
|
||||
"RpmArchive": "RpmDBEntry",
|
||||
},
|
||||
wantComments: map[string]string{
|
||||
"github.com/anchore/syft/syft/pkg.RpmDBEntry": "RpmDBEntry represents all captured data from a RPM DB package entry.",
|
||||
"github.com/anchore/syft/syft/pkg.RpmDBEntry.Name": "Name is the RPM package name.",
|
||||
"github.com/anchore/syft/syft/pkg.RpmDBEntry.Epoch": "Epoch is the version epoch.",
|
||||
"github.com/anchore/syft/syft/pkg.RpmArchive.Name": "Name is the RPM package name.",
|
||||
"github.com/anchore/syft/syft/pkg.RpmArchive.Epoch": "Epoch is the version epoch.",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles multiple aliases",
|
||||
commentMap: map[string]string{
|
||||
"github.com/anchore/syft/syft/pkg.DpkgDBEntry": "DpkgDBEntry represents data from dpkg.",
|
||||
"github.com/anchore/syft/syft/pkg.DpkgDBEntry.Package": "Package is the package name.",
|
||||
"github.com/anchore/syft/syft/pkg.DpkgDBEntry.Architecture": "Architecture is the target arch.",
|
||||
},
|
||||
aliases: map[string]string{
|
||||
"DpkgArchiveEntry": "DpkgDBEntry",
|
||||
"DpkgSnapshot": "DpkgDBEntry",
|
||||
},
|
||||
wantComments: map[string]string{
|
||||
"github.com/anchore/syft/syft/pkg.DpkgDBEntry": "DpkgDBEntry represents data from dpkg.",
|
||||
"github.com/anchore/syft/syft/pkg.DpkgDBEntry.Package": "Package is the package name.",
|
||||
"github.com/anchore/syft/syft/pkg.DpkgDBEntry.Architecture": "Architecture is the target arch.",
|
||||
"github.com/anchore/syft/syft/pkg.DpkgArchiveEntry.Package": "Package is the package name.",
|
||||
"github.com/anchore/syft/syft/pkg.DpkgArchiveEntry.Architecture": "Architecture is the target arch.",
|
||||
"github.com/anchore/syft/syft/pkg.DpkgSnapshot.Package": "Package is the package name.",
|
||||
"github.com/anchore/syft/syft/pkg.DpkgSnapshot.Architecture": "Architecture is the target arch.",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "does not copy non-field comments",
|
||||
commentMap: map[string]string{
|
||||
"github.com/anchore/syft/syft/pkg.SomeType": "SomeType struct comment.",
|
||||
"github.com/anchore/syft/syft/pkg.SomeType.Field": "Field comment.",
|
||||
},
|
||||
aliases: map[string]string{
|
||||
"AliasType": "SomeType",
|
||||
},
|
||||
wantComments: map[string]string{
|
||||
"github.com/anchore/syft/syft/pkg.SomeType": "SomeType struct comment.",
|
||||
"github.com/anchore/syft/syft/pkg.SomeType.Field": "Field comment.",
|
||||
"github.com/anchore/syft/syft/pkg.AliasType.Field": "Field comment.",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// create temp dir for testing
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// create a test go file with type aliases
|
||||
testFile := filepath.Join(tmpDir, "test.go")
|
||||
content := "package test\n\n"
|
||||
for alias, source := range tt.aliases {
|
||||
content += "type " + alias + " " + source + "\n"
|
||||
}
|
||||
err := os.WriteFile(testFile, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make a copy of the comment map since the function modifies it
|
||||
commentMap := make(map[string]string)
|
||||
for k, v := range tt.commentMap {
|
||||
commentMap[k] = v
|
||||
}
|
||||
|
||||
// run the function
|
||||
copyAliasFieldComments(commentMap, tmpDir)
|
||||
|
||||
// verify results
|
||||
assert.Equal(t, tt.wantComments, commentMap)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindTypeAliases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fileContent string
|
||||
wantAliases map[string]string
|
||||
}{
|
||||
{
|
||||
name: "finds simple type alias",
|
||||
fileContent: `package test
|
||||
|
||||
type RpmArchive RpmDBEntry
|
||||
type DpkgArchiveEntry DpkgDBEntry
|
||||
`,
|
||||
wantAliases: map[string]string{
|
||||
"RpmArchive": "RpmDBEntry",
|
||||
"DpkgArchiveEntry": "DpkgDBEntry",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignores struct definitions",
|
||||
fileContent: `package test
|
||||
|
||||
type MyStruct struct {
|
||||
Field string
|
||||
}
|
||||
|
||||
type AliasType BaseType
|
||||
`,
|
||||
wantAliases: map[string]string{
|
||||
"AliasType": "BaseType",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignores interface definitions",
|
||||
fileContent: `package test
|
||||
|
||||
type MyInterface interface {
|
||||
Method()
|
||||
}
|
||||
|
||||
type AliasType BaseType
|
||||
`,
|
||||
wantAliases: map[string]string{
|
||||
"AliasType": "BaseType",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles multiple files",
|
||||
fileContent: `package test
|
||||
|
||||
type Alias1 Base1
|
||||
type Alias2 Base2
|
||||
`,
|
||||
wantAliases: map[string]string{
|
||||
"Alias1": "Base1",
|
||||
"Alias2": "Base2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// create temp dir
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// write test file
|
||||
testFile := filepath.Join(tmpDir, "test.go")
|
||||
err := os.WriteFile(testFile, []byte(tt.fileContent), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run function
|
||||
aliases := findTypeAliases(tmpDir)
|
||||
|
||||
// verify
|
||||
assert.Equal(t, tt.wantAliases, aliases)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasDescriptionInAlternatives(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
schema *jsonschema.Schema
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "returns true when oneOf has description",
|
||||
schema: &jsonschema.Schema{
|
||||
OneOf: []*jsonschema.Schema{
|
||||
{Description: "First alternative"},
|
||||
{Type: "null"},
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "returns true when anyOf has description",
|
||||
schema: &jsonschema.Schema{
|
||||
AnyOf: []*jsonschema.Schema{
|
||||
{Description: "First alternative"},
|
||||
{Type: "null"},
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "returns false when no alternatives have descriptions",
|
||||
schema: &jsonschema.Schema{
|
||||
OneOf: []*jsonschema.Schema{
|
||||
{Type: "integer"},
|
||||
{Type: "null"},
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "returns false when no oneOf or anyOf",
|
||||
schema: &jsonschema.Schema{
|
||||
Type: "string",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "returns true when any alternative in oneOf has description",
|
||||
schema: &jsonschema.Schema{
|
||||
OneOf: []*jsonschema.Schema{
|
||||
{Type: "integer"},
|
||||
{Type: "string", Description: "Second alternative"},
|
||||
{Type: "null"},
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := hasDescriptionInAlternatives(tt.schema)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWarnMissingDescriptions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
schema *jsonschema.Schema
|
||||
metadataNames []string
|
||||
wantTypeWarnings int
|
||||
wantFieldWarnings int
|
||||
}{
|
||||
{
|
||||
name: "no warnings when all types have descriptions",
|
||||
schema: &jsonschema.Schema{
|
||||
Definitions: map[string]*jsonschema.Schema{
|
||||
"TypeA": {
|
||||
Description: "Type A description",
|
||||
Properties: newOrderedMap(map[string]*jsonschema.Schema{
|
||||
"field1": {Type: "string", Description: "Field 1"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
metadataNames: []string{"TypeA"},
|
||||
wantTypeWarnings: 0,
|
||||
wantFieldWarnings: 0,
|
||||
},
|
||||
{
|
||||
name: "warns about missing type description",
|
||||
schema: &jsonschema.Schema{
|
||||
Definitions: map[string]*jsonschema.Schema{
|
||||
"TypeA": {
|
||||
Properties: newOrderedMap(map[string]*jsonschema.Schema{
|
||||
"field1": {Type: "string", Description: "Field 1"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
metadataNames: []string{"TypeA"},
|
||||
wantTypeWarnings: 1,
|
||||
wantFieldWarnings: 0,
|
||||
},
|
||||
{
|
||||
name: "warns about missing field description",
|
||||
schema: &jsonschema.Schema{
|
||||
Definitions: map[string]*jsonschema.Schema{
|
||||
"TypeA": {
|
||||
Description: "Type A description",
|
||||
Properties: newOrderedMap(map[string]*jsonschema.Schema{
|
||||
"field1": {Type: "string"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
metadataNames: []string{"TypeA"},
|
||||
wantTypeWarnings: 0,
|
||||
wantFieldWarnings: 1,
|
||||
},
|
||||
{
|
||||
name: "skips fields with references",
|
||||
schema: &jsonschema.Schema{
|
||||
Definitions: map[string]*jsonschema.Schema{
|
||||
"TypeA": {
|
||||
Description: "Type A description",
|
||||
Properties: newOrderedMap(map[string]*jsonschema.Schema{
|
||||
"field1": {Ref: "#/$defs/OtherType"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
metadataNames: []string{"TypeA"},
|
||||
wantTypeWarnings: 0,
|
||||
wantFieldWarnings: 0,
|
||||
},
|
||||
{
|
||||
name: "skips fields with items that are references",
|
||||
schema: &jsonschema.Schema{
|
||||
Definitions: map[string]*jsonschema.Schema{
|
||||
"TypeA": {
|
||||
Description: "Type A description",
|
||||
Properties: newOrderedMap(map[string]*jsonschema.Schema{
|
||||
"field1": {
|
||||
Type: "array",
|
||||
Items: &jsonschema.Schema{Ref: "#/$defs/OtherType"},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
metadataNames: []string{"TypeA"},
|
||||
wantTypeWarnings: 0,
|
||||
wantFieldWarnings: 0,
|
||||
},
|
||||
{
|
||||
name: "skips fields with oneOf containing descriptions",
|
||||
schema: &jsonschema.Schema{
|
||||
Definitions: map[string]*jsonschema.Schema{
|
||||
"TypeA": {
|
||||
Description: "Type A description",
|
||||
Properties: newOrderedMap(map[string]*jsonschema.Schema{
|
||||
"field1": {
|
||||
OneOf: []*jsonschema.Schema{
|
||||
{Type: "integer", Description: "Integer value"},
|
||||
{Type: "null"},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
metadataNames: []string{"TypeA"},
|
||||
wantTypeWarnings: 0,
|
||||
wantFieldWarnings: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// capture stderr output would require more complex testing
|
||||
// for now, just verify the function runs without panicking
|
||||
require.NotPanics(t, func() {
|
||||
warnMissingDescriptions(tt.schema, tt.metadataNames)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// helper to create an ordered map from a regular map
|
||||
func newOrderedMap(m map[string]*jsonschema.Schema) *orderedmap.OrderedMap {
|
||||
om := orderedmap.New()
|
||||
for k, v := range m {
|
||||
om.Set(k, v)
|
||||
}
|
||||
return om
|
||||
}
|
||||
@ -15,8 +15,8 @@ import (
|
||||
"github.com/invopop/jsonschema"
|
||||
|
||||
"github.com/anchore/syft/internal"
|
||||
"github.com/anchore/syft/internal/packagemetadata"
|
||||
syftJsonModel "github.com/anchore/syft/syft/format/syftjson/model"
|
||||
"github.com/anchore/syft/syft/internal/packagemetadata"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -26,6 +26,17 @@ are not captured (empty interfaces). This means that pkg.Package.Metadata is not
|
||||
can be extended to include specific package metadata struct shapes in the future.
|
||||
*/
|
||||
|
||||
var repoRoot string
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
repoRoot, err = packagemetadata.RepoRoot()
|
||||
if err != nil {
|
||||
fmt.Println("unable to determine repo root")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
write(encode(build()))
|
||||
}
|
||||
@ -60,7 +71,7 @@ func assembleTypeContainer(items []any) (any, map[string]string) {
|
||||
}
|
||||
|
||||
if len(typesMissingNames) > 0 {
|
||||
fmt.Println("the following types are missing JSON names (manually curated in ./syft/internal/packagemetadata/names.go):")
|
||||
fmt.Println("the following types are missing JSON names (manually curated in ./internal/packagemetadata/names.go):")
|
||||
for _, t := range typesMissingNames {
|
||||
fmt.Println(" - ", t.Name())
|
||||
}
|
||||
@ -78,6 +89,38 @@ func build() *jsonschema.Schema {
|
||||
Namer: func(r reflect.Type) string {
|
||||
return strings.TrimPrefix(r.Name(), "JSON")
|
||||
},
|
||||
CommentMap: make(map[string]string),
|
||||
}
|
||||
|
||||
// extract comments from Go source files to enrich schema descriptions
|
||||
//
|
||||
// note: AddGoComments parses from the module root and creates keys like "syft/pkg.TypeName",
|
||||
// but the reflector expects fully qualified paths like "github.com/anchore/syft/syft/pkg.TypeName".
|
||||
// We fix up the keys after extraction to match the expected format.
|
||||
if err := reflector.AddGoComments("github.com/anchore/syft", repoRoot); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to extract Go comments: %v\n", err)
|
||||
} else {
|
||||
// fix up comment map keys to use fully qualified import paths
|
||||
// note: AddGoComments includes the absolute repo path WITHOUT the leading slash
|
||||
repoRootNoSlash := strings.TrimPrefix(repoRoot, "/")
|
||||
fixedMap := make(map[string]string)
|
||||
for k, v := range reflector.CommentMap {
|
||||
newKey := k
|
||||
if !strings.HasPrefix(k, "github.com/") {
|
||||
// key doesn't have module prefix, add it
|
||||
newKey = "github.com/anchore/syft/" + k
|
||||
} else if strings.Contains(k, repoRootNoSlash) {
|
||||
// key has the absolute repo path embedded, strip it
|
||||
// format: github.com/anchore/syft/Users/wagoodman/code/syft-manual/syft/pkg.Type
|
||||
// should be: github.com/anchore/syft/syft/pkg.Type
|
||||
newKey = strings.Replace(k, repoRootNoSlash+"/", "", 1)
|
||||
}
|
||||
fixedMap[newKey] = v
|
||||
}
|
||||
reflector.CommentMap = fixedMap
|
||||
|
||||
// copy field comments for type aliases (e.g., type RpmArchive RpmDBEntry)
|
||||
copyAliasFieldComments(reflector.CommentMap, repoRoot)
|
||||
}
|
||||
|
||||
pkgMetadataContainer, pkgMetadataMapping := assembleTypeContainer(packagemetadata.AllTypes())
|
||||
@ -130,6 +173,9 @@ func build() *jsonschema.Schema {
|
||||
"anyOf": metadataTypes,
|
||||
})
|
||||
|
||||
// warn about missing descriptions
|
||||
warnMissingDescriptions(documentSchema, metadataNames)
|
||||
|
||||
return documentSchema
|
||||
}
|
||||
|
||||
@ -148,11 +194,6 @@ func encode(schema *jsonschema.Schema) []byte {
|
||||
}
|
||||
|
||||
func write(schema []byte) {
|
||||
repoRoot, err := packagemetadata.RepoRoot()
|
||||
if err != nil {
|
||||
fmt.Println("unable to determine repo root")
|
||||
os.Exit(1)
|
||||
}
|
||||
schemaPath := filepath.Join(repoRoot, "schema", "json", fmt.Sprintf("schema-%s.json", internal.JSONSchemaVersion))
|
||||
latestSchemaPath := filepath.Join(repoRoot, "schema", "json", "schema-latest.json")
|
||||
|
||||
@ -81,6 +81,10 @@ func Test_EnvironmentTask(t *testing.T) {
|
||||
// get the source
|
||||
theSource, err := syft.GetSource(context.Background(), tarPath, syft.DefaultGetSourceConfig().WithSources("docker-archive"))
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, theSource.Close())
|
||||
})
|
||||
|
||||
resolver, err := theSource.FileResolver(source.SquashedScope)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@ var knownNonMetadataTypeNames = strset.New(
|
||||
|
||||
// these are names that would be removed due to common convention (e.g. used within another metadata type) but are
|
||||
// known to be metadata types themselves. Adding to this list will prevent the removal of the type from the schema.
|
||||
var knownMetadaTypeNames = strset.New(
|
||||
var knownMetadataTypeNames = strset.New(
|
||||
"DotnetPortableExecutableEntry",
|
||||
)
|
||||
|
||||
@ -72,7 +72,7 @@ func findMetadataDefinitionNames(paths ...string) ([]string, error) {
|
||||
}
|
||||
|
||||
// any definition that is used within another struct should not be considered a top-level metadata definition
|
||||
removeNames := strset.Difference(usedNames, knownMetadaTypeNames)
|
||||
removeNames := strset.Difference(usedNames, knownMetadataTypeNames)
|
||||
names.Remove(removeNames.List()...)
|
||||
|
||||
// remove known exceptions, that is, types exported in the pkg Package that are not used
|
||||
@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/dave/jennifer/jen"
|
||||
|
||||
"github.com/anchore/syft/syft/internal/packagemetadata"
|
||||
"github.com/anchore/syft/internal/packagemetadata"
|
||||
)
|
||||
|
||||
// This program is invoked from syft/internal and generates packagemetadata/generated.go
|
||||
@ -31,7 +31,7 @@ func main() {
|
||||
fmt.Printf("updating package metadata type list with %+v types\n", len(typeNames))
|
||||
|
||||
f := jen.NewFile("packagemetadata")
|
||||
f.HeaderComment("DO NOT EDIT: generated by syft/internal/packagemetadata/generate/main.go")
|
||||
f.HeaderComment("DO NOT EDIT: generated by internal/packagemetadata/generate/main.go")
|
||||
f.ImportName(pkgImport, "pkg")
|
||||
f.Comment("AllTypes returns a list of all pkg metadata types that syft supports (that are represented in the pkg.Package.Metadata field).")
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// DO NOT EDIT: generated by syft/internal/packagemetadata/generate/main.go
|
||||
// DO NOT EDIT: generated by internal/packagemetadata/generate/main.go
|
||||
|
||||
package packagemetadata
|
||||
|
||||
@ -16,6 +16,7 @@ func AllTypes() []any {
|
||||
pkg.ConanV2LockEntry{},
|
||||
pkg.ConanfileEntry{},
|
||||
pkg.ConaninfoEntry{},
|
||||
pkg.CondaMetaPackage{},
|
||||
pkg.DartPubspec{},
|
||||
pkg.DartPubspecLockEntry{},
|
||||
pkg.DotnetDepsEntry{},
|
||||
@ -26,9 +27,11 @@ func AllTypes() []any {
|
||||
pkg.ELFBinaryPackageNoteJSONPayload{},
|
||||
pkg.ElixirMixLockEntry{},
|
||||
pkg.ErlangRebarLockEntry{},
|
||||
pkg.GGUFFileHeader{},
|
||||
pkg.GitHubActionsUseStatement{},
|
||||
pkg.GolangBinaryBuildinfoEntry{},
|
||||
pkg.GolangModuleEntry{},
|
||||
pkg.GolangSourceEntry{},
|
||||
pkg.HackageStackYamlEntry{},
|
||||
pkg.HackageStackYamlLockEntry{},
|
||||
pkg.HomebrewFormula{},
|
||||
@ -47,8 +50,10 @@ func AllTypes() []any {
|
||||
pkg.PhpComposerLockEntry{},
|
||||
pkg.PhpPearEntry{},
|
||||
pkg.PhpPeclEntry{},
|
||||
pkg.PnpmLockEntry{},
|
||||
pkg.PortageEntry{},
|
||||
pkg.PythonPackage{},
|
||||
pkg.PythonPdmLockEntry{},
|
||||
pkg.PythonPipfileLockEntry{},
|
||||
pkg.PythonPoetryLockEntry{},
|
||||
pkg.PythonRequirementsEntry{},
|
||||
@ -59,6 +64,7 @@ func AllTypes() []any {
|
||||
pkg.RubyGemspec{},
|
||||
pkg.RustBinaryAuditEntry{},
|
||||
pkg.RustCargoLockEntry{},
|
||||
pkg.SnapEntry{},
|
||||
pkg.SwiftPackageManagerResolvedEntry{},
|
||||
pkg.SwiplPackEntry{},
|
||||
pkg.TerraformLockProviderEntry{},
|
||||
@ -82,6 +82,7 @@ var jsonTypes = makeJSONTypes(
|
||||
jsonNames(pkg.GitHubActionsUseStatement{}, "github-actions-use-statement"),
|
||||
jsonNames(pkg.GolangBinaryBuildinfoEntry{}, "go-module-buildinfo-entry", "GolangBinMetadata", "GolangMetadata"),
|
||||
jsonNames(pkg.GolangModuleEntry{}, "go-module-entry", "GolangModMetadata"),
|
||||
jsonNames(pkg.GolangSourceEntry{}, "go-source-entry"),
|
||||
jsonNames(pkg.HackageStackYamlLockEntry{}, "haskell-hackage-stack-lock-entry", "HackageMetadataType"),
|
||||
jsonNamesWithoutLookup(pkg.HackageStackYamlEntry{}, "haskell-hackage-stack-entry", "HackageMetadataType"), // the legacy value is split into two types, where the other is preferred
|
||||
jsonNames(pkg.JavaArchive{}, "java-archive", "JavaMetadata"),
|
||||
@ -94,13 +95,15 @@ var jsonTypes = makeJSONTypes(
|
||||
jsonNames(pkg.NpmPackage{}, "javascript-npm-package", "NpmPackageJsonMetadata"),
|
||||
jsonNames(pkg.NpmPackageLockEntry{}, "javascript-npm-package-lock-entry", "NpmPackageLockJsonMetadata"),
|
||||
jsonNames(pkg.YarnLockEntry{}, "javascript-yarn-lock-entry", "YarnLockJsonMetadata"),
|
||||
jsonNames(pkg.PnpmLockEntry{}, "javascript-pnpm-lock-entry"),
|
||||
jsonNames(pkg.PEBinary{}, "pe-binary"),
|
||||
jsonNames(pkg.PhpComposerLockEntry{}, "php-composer-lock-entry", "PhpComposerJsonMetadata"),
|
||||
jsonNamesWithoutLookup(pkg.PhpComposerInstalledEntry{}, "php-composer-installed-entry", "PhpComposerJsonMetadata"), // the legacy value is split into two types, where the other is preferred
|
||||
jsonNames(pkg.PhpPeclEntry{}, "php-pecl-entry", "PhpPeclMetadata"),
|
||||
jsonNames(pkg.PhpPeclEntry{}, "php-pecl-entry", "PhpPeclMetadata"), //nolint:staticcheck
|
||||
jsonNames(pkg.PhpPearEntry{}, "php-pear-entry"),
|
||||
jsonNames(pkg.PortageEntry{}, "portage-db-entry", "PortageMetadata"),
|
||||
jsonNames(pkg.PythonPackage{}, "python-package", "PythonPackageMetadata"),
|
||||
jsonNames(pkg.PythonPdmLockEntry{}, "python-pdm-lock-entry"),
|
||||
jsonNames(pkg.PythonPipfileLockEntry{}, "python-pipfile-lock-entry", "PythonPipfileLockMetadata"),
|
||||
jsonNames(pkg.PythonPoetryLockEntry{}, "python-poetry-lock-entry", "PythonPoetryLockMetadata"),
|
||||
jsonNames(pkg.PythonRequirementsEntry{}, "python-pip-requirements-entry", "PythonRequirementsMetadata"),
|
||||
@ -114,11 +117,14 @@ var jsonTypes = makeJSONTypes(
|
||||
jsonNames(pkg.OpamPackage{}, "opam-package"),
|
||||
jsonNames(pkg.RustCargoLockEntry{}, "rust-cargo-lock-entry", "RustCargoPackageMetadata"),
|
||||
jsonNamesWithoutLookup(pkg.RustBinaryAuditEntry{}, "rust-cargo-audit-entry", "RustCargoPackageMetadata"), // the legacy value is split into two types, where the other is preferred
|
||||
jsonNames(pkg.SnapEntry{}, "snap-entry"),
|
||||
jsonNames(pkg.WordpressPluginEntry{}, "wordpress-plugin-entry", "WordpressMetadata"),
|
||||
jsonNames(pkg.HomebrewFormula{}, "homebrew-formula"),
|
||||
jsonNames(pkg.LuaRocksPackage{}, "luarocks-package"),
|
||||
jsonNames(pkg.TerraformLockProviderEntry{}, "terraform-lock-provider-entry"),
|
||||
jsonNames(pkg.DotnetPackagesLockEntry{}, "dotnet-packages-lock-entry"),
|
||||
jsonNames(pkg.CondaMetaPackage{}, "conda-metadata-entry", "CondaPackageMetadata"),
|
||||
jsonNames(pkg.GGUFFileHeader{}, "gguf-file-header"),
|
||||
)
|
||||
|
||||
func expandLegacyNameVariants(names ...string) []string {
|
||||
@ -347,6 +347,12 @@ func Test_JSONName_JSONLegacyName(t *testing.T) {
|
||||
expectedJSONName: "go-module-entry",
|
||||
expectedLegacyName: "GolangModMetadata",
|
||||
},
|
||||
{
|
||||
name: "GolangSourceMetadata",
|
||||
metadata: pkg.GolangSourceEntry{},
|
||||
expectedJSONName: "go-source-entry",
|
||||
expectedLegacyName: "go-source-entry",
|
||||
},
|
||||
{
|
||||
name: "HackageStackYamlLockMetadata",
|
||||
metadata: pkg.HackageStackYamlLockEntry{},
|
||||
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/dave/jennifer/jen"
|
||||
|
||||
"github.com/anchore/syft/syft/internal/sourcemetadata"
|
||||
"github.com/anchore/syft/internal/sourcemetadata"
|
||||
)
|
||||
|
||||
// This program is invoked from syft/internal and generates sourcemetadata/generated.go
|
||||
@ -25,7 +25,7 @@ func main() {
|
||||
fmt.Printf("updating source metadata type list with %+v types\n", len(typeNames))
|
||||
|
||||
f := jen.NewFile("sourcemetadata")
|
||||
f.HeaderComment("DO NOT EDIT: generated by syft/internal/sourcemetadata/generate/main.go")
|
||||
f.HeaderComment("DO NOT EDIT: generated by internal/sourcemetadata/generate/main.go")
|
||||
f.ImportName(srcImport, "source")
|
||||
f.Comment("AllTypes returns a list of all source metadata types that syft supports (that are represented in the source.Description.Metadata field).")
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// DO NOT EDIT: generated by syft/internal/sourcemetadata/generate/main.go
|
||||
// DO NOT EDIT: generated by internal/sourcemetadata/generate/main.go
|
||||
|
||||
package sourcemetadata
|
||||
|
||||
@ -10,7 +10,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This program generates license_list.go.
|
||||
@ -20,8 +19,7 @@ const (
|
||||
)
|
||||
|
||||
var tmp = template.Must(template.New("").Parse(`// Code generated by go generate; DO NOT EDIT.
|
||||
// This file was generated by robots at {{ .Timestamp }}
|
||||
// using data from {{ .URL }}
|
||||
// This file was generated using data from {{ .URL }}
|
||||
package spdxlicense
|
||||
|
||||
const Version = {{ printf "%q" .Version }}
|
||||
@ -31,6 +29,13 @@ var licenseIDs = map[string]string{
|
||||
{{ printf "%q" $k }}: {{ printf "%q" $v }},
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
// urlToLicense maps license URLs from the seeAlso field to license IDs
|
||||
var urlToLicense = map[string]string{
|
||||
{{- range $url, $id := .URLToLicense }}
|
||||
{{ printf "%q" $url }}: {{ printf "%q" $id }},
|
||||
{{- end }}
|
||||
}
|
||||
`))
|
||||
|
||||
var versionMatch = regexp.MustCompile(`([0-9]+)\.?([0-9]+)?\.?([0-9]+)?\.?`)
|
||||
@ -68,17 +73,18 @@ func run() error {
|
||||
}()
|
||||
|
||||
licenseIDs := processSPDXLicense(result)
|
||||
urlToLicense := buildURLToLicenseMap(result)
|
||||
|
||||
err = tmp.Execute(f, struct {
|
||||
Timestamp time.Time
|
||||
URL string
|
||||
Version string
|
||||
LicenseIDs map[string]string
|
||||
URLToLicense map[string]string
|
||||
}{
|
||||
Timestamp: time.Now(),
|
||||
URL: url,
|
||||
Version: result.Version,
|
||||
LicenseIDs: licenseIDs,
|
||||
URLToLicense: urlToLicense,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@ -156,3 +162,30 @@ func cleanLicenseID(id string) string {
|
||||
cleanID := strings.ToLower(id)
|
||||
return strings.ReplaceAll(cleanID, "-", "")
|
||||
}
|
||||
|
||||
// buildURLToLicenseMap creates a mapping from license URLs (from seeAlso fields) to license IDs
|
||||
func buildURLToLicenseMap(result LicenseList) map[string]string {
|
||||
urlMap := make(map[string]string)
|
||||
|
||||
for _, l := range result.Licenses {
|
||||
// Skip deprecated licenses
|
||||
if l.Deprecated {
|
||||
// Find replacement license if available
|
||||
replacement := result.findReplacementLicense(l)
|
||||
if replacement != nil {
|
||||
// Map deprecated license URLs to the replacement license
|
||||
for _, url := range l.SeeAlso {
|
||||
urlMap[url] = replacement.ID
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Add URLs from non-deprecated licenses
|
||||
for _, url := range l.SeeAlso {
|
||||
urlMap[url] = l.ID
|
||||
}
|
||||
}
|
||||
|
||||
return urlMap
|
||||
}
|
||||
|
||||
@ -35,3 +35,20 @@ func cleanLicenseID(id string) string {
|
||||
id = strings.ToLower(id)
|
||||
return strings.ReplaceAll(id, "-", "")
|
||||
}
|
||||
|
||||
// LicenseInfo contains license ID and name
|
||||
type LicenseInfo struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
// LicenseByURL returns the license ID and name for a given URL from the SPDX license list
|
||||
// The URL should match one of the URLs in the seeAlso field of an SPDX license
|
||||
func LicenseByURL(url string) (LicenseInfo, bool) {
|
||||
url = strings.TrimSpace(url)
|
||||
if id, exists := urlToLicense[url]; exists {
|
||||
return LicenseInfo{
|
||||
ID: id,
|
||||
}, true
|
||||
}
|
||||
return LicenseInfo{}, false
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
97
internal/spdxlicense/license_url_test.go
Normal file
97
internal/spdxlicense/license_url_test.go
Normal file
@ -0,0 +1,97 @@
|
||||
package spdxlicense
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLicenseByURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
wantID string
|
||||
wantFound bool
|
||||
}{
|
||||
{
|
||||
name: "MIT license URL (https)",
|
||||
url: "https://opensource.org/license/mit/",
|
||||
wantID: "MIT",
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "MIT license URL (http)",
|
||||
url: "http://opensource.org/licenses/MIT",
|
||||
wantID: "MIT",
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "Apache 2.0 license URL",
|
||||
url: "https://www.apache.org/licenses/LICENSE-2.0",
|
||||
wantID: "Apache-2.0",
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "GPL 3.0 or later URL",
|
||||
url: "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
|
||||
wantID: "GPL-3.0-or-later",
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "BSD 3-Clause URL",
|
||||
url: "https://opensource.org/licenses/BSD-3-Clause",
|
||||
wantID: "BSD-3-Clause",
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "URL with trailing whitespace",
|
||||
url: " http://opensource.org/licenses/MIT ",
|
||||
wantID: "MIT",
|
||||
wantFound: true,
|
||||
},
|
||||
{
|
||||
name: "Unknown URL",
|
||||
url: "https://example.com/unknown-license",
|
||||
wantID: "",
|
||||
wantFound: false,
|
||||
},
|
||||
{
|
||||
name: "Empty URL",
|
||||
url: "",
|
||||
wantID: "",
|
||||
wantFound: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
info, found := LicenseByURL(tt.url)
|
||||
if found != tt.wantFound {
|
||||
t.Errorf("LicenseByURL() found = %v, want %v", found, tt.wantFound)
|
||||
}
|
||||
if found {
|
||||
if info.ID != tt.wantID {
|
||||
t.Errorf("LicenseByURL() ID = %v, want %v", info.ID, tt.wantID)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLicenseByURL_DeprecatedLicenses(t *testing.T) {
|
||||
// Test that deprecated license URLs map to their replacement licenses
|
||||
// For example, GPL-2.0+ should map to GPL-2.0-or-later
|
||||
|
||||
// This test needs actual URLs from deprecated licenses
|
||||
// We can verify by checking if a deprecated license URL maps to a non-deprecated ID
|
||||
url := "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html"
|
||||
info, found := LicenseByURL(url)
|
||||
|
||||
if found {
|
||||
// Check that we got a valid non-deprecated license ID
|
||||
if info.ID == "" {
|
||||
t.Error("Got empty license ID for deprecated license URL")
|
||||
}
|
||||
// The ID should be the replacement (GPL-2.0-only or GPL-2.0-or-later)
|
||||
// depending on the URL
|
||||
t.Logf("Deprecated license URL mapped to: ID=%s", info.ID)
|
||||
}
|
||||
}
|
||||
@ -3,10 +3,12 @@ package task
|
||||
import (
|
||||
"github.com/anchore/syft/syft/cataloging/pkgcataloging"
|
||||
"github.com/anchore/syft/syft/pkg"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/ai"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/alpine"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/arch"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/binary"
|
||||
bitnamiSbomCataloger "github.com/anchore/syft/syft/pkg/cataloger/bitnami"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/conda"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/cpp"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/dart"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/debian"
|
||||
@ -31,6 +33,7 @@ import (
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/ruby"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/rust"
|
||||
sbomCataloger "github.com/anchore/syft/syft/pkg/cataloger/sbom"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/snap"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/swift"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/swipl"
|
||||
"github.com/anchore/syft/syft/pkg/cataloger/terraform"
|
||||
@ -50,6 +53,9 @@ const (
|
||||
JavaScript = "javascript"
|
||||
Node = "node"
|
||||
NPM = "npm"
|
||||
|
||||
// Python ecosystem labels
|
||||
Python = "python"
|
||||
)
|
||||
|
||||
//nolint:funlen
|
||||
@ -107,7 +113,7 @@ func DefaultPackageTaskFactories() Factories {
|
||||
func(cfg CatalogingFactoryConfig) pkg.Cataloger {
|
||||
return python.NewPackageCataloger(cfg.PackagesConfig.Python)
|
||||
},
|
||||
pkgcataloging.DeclaredTag, pkgcataloging.DirectoryTag, pkgcataloging.LanguageTag, "python",
|
||||
pkgcataloging.DeclaredTag, pkgcataloging.DirectoryTag, pkgcataloging.LanguageTag, Python,
|
||||
),
|
||||
newSimplePackageTaskFactory(ruby.NewGemFileLockCataloger, pkgcataloging.DeclaredTag, pkgcataloging.DirectoryTag, pkgcataloging.LanguageTag, "ruby", "gem"),
|
||||
newSimplePackageTaskFactory(ruby.NewGemSpecCataloger, pkgcataloging.DeclaredTag, pkgcataloging.DirectoryTag, pkgcataloging.LanguageTag, "ruby", "gem", "gemspec"),
|
||||
@ -125,7 +131,7 @@ func DefaultPackageTaskFactories() Factories {
|
||||
pkgcataloging.InstalledTag, pkgcataloging.ImageTag, pkgcataloging.DirectoryTag, pkgcataloging.LanguageTag, "dotnet", "c#",
|
||||
),
|
||||
newSimplePackageTaskFactory(dotnet.NewDotnetPackagesLockCataloger, pkgcataloging.DeclaredTag, pkgcataloging.ImageTag, pkgcataloging.DirectoryTag, pkgcataloging.LanguageTag, "dotnet", "c#"),
|
||||
newSimplePackageTaskFactory(python.NewInstalledPackageCataloger, pkgcataloging.DirectoryTag, pkgcataloging.InstalledTag, pkgcataloging.ImageTag, pkgcataloging.LanguageTag, "python"),
|
||||
newSimplePackageTaskFactory(python.NewInstalledPackageCataloger, pkgcataloging.DirectoryTag, pkgcataloging.InstalledTag, pkgcataloging.ImageTag, pkgcataloging.LanguageTag, Python),
|
||||
newPackageTaskFactory(
|
||||
func(cfg CatalogingFactoryConfig) pkg.Cataloger {
|
||||
return golang.NewGoModuleBinaryCataloger(cfg.PackagesConfig.Golang)
|
||||
@ -171,12 +177,15 @@ func DefaultPackageTaskFactories() Factories {
|
||||
newSimplePackageTaskFactory(wordpress.NewWordpressPluginCataloger, pkgcataloging.DirectoryTag, pkgcataloging.ImageTag, "wordpress"),
|
||||
newSimplePackageTaskFactory(terraform.NewLockCataloger, pkgcataloging.DeclaredTag, pkgcataloging.DirectoryTag, "terraform"),
|
||||
newSimplePackageTaskFactory(homebrew.NewCataloger, pkgcataloging.DirectoryTag, pkgcataloging.InstalledTag, pkgcataloging.ImageTag, "homebrew"),
|
||||
newSimplePackageTaskFactory(conda.NewCondaMetaCataloger, pkgcataloging.DirectoryTag, pkgcataloging.InstalledTag, pkgcataloging.PackageTag, "conda"),
|
||||
newSimplePackageTaskFactory(snap.NewCataloger, pkgcataloging.DirectoryTag, pkgcataloging.InstalledTag, pkgcataloging.ImageTag, "snap"),
|
||||
newSimplePackageTaskFactory(ai.NewGGUFCataloger, pkgcataloging.DirectoryTag, pkgcataloging.ImageTag, "ai", "model", "gguf", "ml"),
|
||||
|
||||
// deprecated catalogers ////////////////////////////////////////
|
||||
// these are catalogers that should not be selectable other than specific inclusion via name or "deprecated" tag (to remain backwards compatible)
|
||||
newSimplePackageTaskFactory(dotnet.NewDotnetDepsCataloger, pkgcataloging.DeprecatedTag), // TODO: remove in syft v2.0
|
||||
newSimplePackageTaskFactory(dotnet.NewDotnetPortableExecutableCataloger, pkgcataloging.DeprecatedTag), // TODO: remove in syft v2.0
|
||||
newSimplePackageTaskFactory(php.NewPeclCataloger, pkgcataloging.DeprecatedTag), // TODO: remove in syft v2.0
|
||||
newSimplePackageTaskFactory(nix.NewStoreCataloger, pkgcataloging.DeprecatedTag), // TODO: remove in syft v2.0
|
||||
newSimplePackageTaskFactory(dotnet.NewDotnetDepsCataloger, pkgcataloging.DeprecatedTag), //nolint:staticcheck // TODO: remove in syft v2.0
|
||||
newSimplePackageTaskFactory(dotnet.NewDotnetPortableExecutableCataloger, pkgcataloging.DeprecatedTag), //nolint:staticcheck // TODO: remove in syft v2.0
|
||||
newSimplePackageTaskFactory(php.NewPeclCataloger, pkgcataloging.DeprecatedTag), //nolint:staticcheck // TODO: remove in syft v2.0
|
||||
newSimplePackageTaskFactory(nix.NewStoreCataloger, pkgcataloging.DeprecatedTag), //nolint:staticcheck // TODO: remove in syft v2.0
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,7 +4,8 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/anchore/archiver/v3"
|
||||
"github.com/mholt/archives"
|
||||
|
||||
"github.com/anchore/syft/internal/log"
|
||||
"github.com/anchore/syft/internal/sbomsync"
|
||||
"github.com/anchore/syft/syft/cataloging"
|
||||
@ -57,9 +58,10 @@ func (c unknownsLabelerTask) finalize(resolver file.Resolver, s *sbom.SBOM) {
|
||||
}
|
||||
|
||||
if c.IncludeUnexpandedArchives {
|
||||
ctx := context.Background()
|
||||
for coords := range s.Artifacts.FileMetadata {
|
||||
unarchiver, notArchiveErr := archiver.ByExtension(coords.RealPath)
|
||||
if unarchiver != nil && notArchiveErr == nil && !hasPackageReference(coords) {
|
||||
format, _, notArchiveErr := archives.Identify(ctx, coords.RealPath, nil)
|
||||
if format != nil && notArchiveErr == nil && !hasPackageReference(coords) {
|
||||
s.Artifacts.Unknowns[coords] = append(s.Artifacts.Unknowns[coords], "archive not cataloged")
|
||||
}
|
||||
}
|
||||
|
||||
@ -14,5 +14,5 @@ equivalents).
|
||||
|
||||
One can also update the schemas and observe the errors in order to make the necessary updates.
|
||||
At the time of writing, the cyclonedx.xsd needed modifications to link to the local spdx.xsd,
|
||||
and also to changes the minOccurs for a license tag to 0. (The json schema does not require
|
||||
and also to change the minOccurs for a license tag to 0. (The json schema does not require
|
||||
modification for the generated file to lint properly, but can simply be copy/pasted).
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
|
||||
elementFormDefault="qualified"
|
||||
targetNamespace="http://cyclonedx.org/schema/spdx"
|
||||
version="1.0-3.24.0">
|
||||
version="1.0-3.26.0">
|
||||
|
||||
<xs:simpleType name="licenseId">
|
||||
<xs:restriction base="xs:string">
|
||||
@ -162,6 +162,11 @@
|
||||
<xs:documentation>Any OSI License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="any-OSI-perl-modules">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Any OSI License - Perl Modules</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Apache-1.0">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Apache License 1.0</xs:documentation>
|
||||
@ -307,6 +312,11 @@
|
||||
<xs:documentation>Boehm-Demers-Weiser GC License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Boehm-GC-without-fee">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Boehm-Demers-Weiser GC License (without fee)</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Borceux">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Borceux license</xs:documentation>
|
||||
@ -812,6 +822,16 @@
|
||||
<xs:documentation>Creative Commons Public Domain Dedication and Certification</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="CC-PDM-1.0">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Creative Commons Public Domain Mark 1.0 Universal</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="CC-SA-1.0">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Creative Commons Share Alike 1.0 Generic</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="CC0-1.0">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Creative Commons Zero v1.0 Universal</xs:documentation>
|
||||
@ -1062,6 +1082,21 @@
|
||||
<xs:documentation>DOC License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="DocBook-Schema">
|
||||
<xs:annotation>
|
||||
<xs:documentation>DocBook Schema License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="DocBook-Stylesheet">
|
||||
<xs:annotation>
|
||||
<xs:documentation>DocBook Stylesheet License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="DocBook-XML">
|
||||
<xs:annotation>
|
||||
<xs:documentation>DocBook XML License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Dotseqn">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Dotseqn License</xs:documentation>
|
||||
@ -1267,6 +1302,11 @@
|
||||
<xs:documentation>GD License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="generic-xts">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Generic XTS License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="GFDL-1.1">
|
||||
<xs:annotation>
|
||||
<xs:documentation>GNU Free Documentation License v1.1</xs:documentation>
|
||||
@ -1527,6 +1567,11 @@
|
||||
<xs:documentation>hdparm License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="HIDAPI">
|
||||
<xs:annotation>
|
||||
<xs:documentation>HIDAPI License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Hippocratic-2.1">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Hippocratic License 2.1</xs:documentation>
|
||||
@ -1617,6 +1662,11 @@
|
||||
<xs:documentation>Historical Permission Notice and Disclaimer with MIT disclaimer</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="HPND-Netrek">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Historical Permission Notice and Disclaimer - Netrek variant</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="HPND-Pbmplus">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Historical Permission Notice and Disclaimer - Pbmplus variant</xs:documentation>
|
||||
@ -1712,6 +1762,11 @@
|
||||
<xs:documentation>Inner Net License v2.0</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="InnoSetup">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Inno Setup License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Intel">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Intel Open Source License</xs:documentation>
|
||||
@ -2052,6 +2107,11 @@
|
||||
<xs:documentation>Minpack License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="MIPS">
|
||||
<xs:annotation>
|
||||
<xs:documentation>MIPS License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="MirOS">
|
||||
<xs:annotation>
|
||||
<xs:documentation>The MirOS Licence</xs:documentation>
|
||||
@ -2072,6 +2132,11 @@
|
||||
<xs:documentation>Enlightenment License (e16)</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="MIT-Click">
|
||||
<xs:annotation>
|
||||
<xs:documentation>MIT Click License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="MIT-CMU">
|
||||
<xs:annotation>
|
||||
<xs:documentation>CMU License</xs:documentation>
|
||||
@ -2772,6 +2837,11 @@
|
||||
<xs:documentation>Ruby License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Ruby-pty">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Ruby pty extension license</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="SAX-PD">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Sax Public Domain Notice</xs:documentation>
|
||||
@ -2807,6 +2877,11 @@
|
||||
<xs:documentation>Sendmail License 8.23</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Sendmail-Open-Source-1.1">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Sendmail Open Source License v1.1</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="SGI-B-1.0">
|
||||
<xs:annotation>
|
||||
<xs:documentation>SGI Free Software License B v1.0</xs:documentation>
|
||||
@ -2867,6 +2942,11 @@
|
||||
<xs:documentation>Sleepycat License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="SMAIL-GPL">
|
||||
<xs:annotation>
|
||||
<xs:documentation>SMAIL General Public License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="SMLNJ">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Standard ML of New Jersey License</xs:documentation>
|
||||
@ -3007,6 +3087,11 @@
|
||||
<xs:documentation>Transitive Grace Period Public Licence 1.0</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="ThirdEye">
|
||||
<xs:annotation>
|
||||
<xs:documentation>ThirdEye License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="threeparttable">
|
||||
<xs:annotation>
|
||||
<xs:documentation>threeparttable License</xs:documentation>
|
||||
@ -3037,6 +3122,11 @@
|
||||
<xs:documentation>THOR Public License 1.0</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="TrustedQSL">
|
||||
<xs:annotation>
|
||||
<xs:documentation>TrustedQSL License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="TTWL">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Text-Tabs+Wrap License</xs:documentation>
|
||||
@ -3057,6 +3147,11 @@
|
||||
<xs:documentation>Technische Universitaet Berlin License 2.0</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Ubuntu-font-1.0">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Ubuntu Font Licence v1.0</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="UCAR">
|
||||
<xs:annotation>
|
||||
<xs:documentation>UCAR License</xs:documentation>
|
||||
@ -3172,6 +3267,11 @@
|
||||
<xs:documentation>Do What The F*ck You Want To Public License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="wwl">
|
||||
<xs:annotation>
|
||||
<xs:documentation>WWL License</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="wxWindows">
|
||||
<xs:annotation>
|
||||
<xs:documentation>wxWindows Library License</xs:documentation>
|
||||
@ -3187,6 +3287,11 @@
|
||||
<xs:documentation>X11 License Distribution Modification Variant</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="X11-swapped">
|
||||
<xs:annotation>
|
||||
<xs:documentation>X11 swapped final paragraphs</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Xdebug-1.03">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Xdebug License v 1.03</xs:documentation>
|
||||
@ -3358,6 +3463,11 @@
|
||||
<xs:documentation>Bootloader Distribution Exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="CGAL-linking-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>CGAL Linking Exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Classpath-exception-2.0">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Classpath exception 2.0</xs:documentation>
|
||||
@ -3383,6 +3493,11 @@
|
||||
<xs:documentation>eCos exception 2.0</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="erlang-otp-linking-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Erlang/OTP Linking Exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Fawkes-Runtime-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Fawkes Runtime Exception</xs:documentation>
|
||||
@ -3425,7 +3540,7 @@
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Gmsh-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Gmsh exception></xs:documentation>
|
||||
<xs:documentation>Gmsh exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="GNAT-exception">
|
||||
@ -3448,6 +3563,11 @@
|
||||
<xs:documentation>GNU JavaMail exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="GPL-3.0-389-ds-base-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>GPL-3.0 389 DS Base Exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="GPL-3.0-interface-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>GPL-3.0 Interface Exception</xs:documentation>
|
||||
@ -3478,11 +3598,21 @@
|
||||
<xs:documentation>GStreamer Exception (2008)</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="harbour-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>harbour exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="i2p-gpl-java-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>i2p GPL+Java Exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Independent-modules-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Independent Module Linking exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="KiCad-libraries-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>KiCad Libraries Exception</xs:documentation>
|
||||
@ -3528,6 +3658,11 @@
|
||||
<xs:documentation>Macros and Inline Functions Exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="mxml-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>mxml Exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="Nokia-Qt-exception-1.1">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Nokia Qt LGPL exception 1.1</xs:documentation>
|
||||
@ -3583,6 +3718,11 @@
|
||||
<xs:documentation>Qwt exception 1.0</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="romic-exception">
|
||||
<xs:annotation>
|
||||
<xs:documentation>Romic Exception</xs:documentation>
|
||||
</xs:annotation>
|
||||
</xs:enumeration>
|
||||
<xs:enumeration value="RRDtool-FLOSS-exception-2.0">
|
||||
<xs:annotation>
|
||||
<xs:documentation>RRDtool FLOSS exception 2.0</xs:documentation>
|
||||
|
||||
3176
schema/json/schema-16.0.37.json
Normal file
3176
schema/json/schema-16.0.37.json
Normal file
File diff suppressed because it is too large
Load Diff
3319
schema/json/schema-16.0.38.json
Normal file
3319
schema/json/schema-16.0.38.json
Normal file
File diff suppressed because it is too large
Load Diff
3345
schema/json/schema-16.0.39.json
Normal file
3345
schema/json/schema-16.0.39.json
Normal file
File diff suppressed because it is too large
Load Diff
3979
schema/json/schema-16.0.40.json
Normal file
3979
schema/json/schema-16.0.40.json
Normal file
File diff suppressed because it is too large
Load Diff
4011
schema/json/schema-16.0.41.json
Normal file
4011
schema/json/schema-16.0.41.json
Normal file
File diff suppressed because it is too large
Load Diff
4148
schema/json/schema-16.0.42.json
Normal file
4148
schema/json/schema-16.0.42.json
Normal file
File diff suppressed because it is too large
Load Diff
4193
schema/json/schema-16.0.43.json
Normal file
4193
schema/json/schema-16.0.43.json
Normal file
File diff suppressed because it is too large
Load Diff
4258
schema/json/schema-16.1.0.json
Normal file
4258
schema/json/schema-16.1.0.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,7 @@ package executable
|
||||
import (
|
||||
"debug/macho"
|
||||
|
||||
"github.com/anchore/syft/internal"
|
||||
"github.com/anchore/syft/syft/file"
|
||||
"github.com/anchore/syft/syft/internal/unionreader"
|
||||
)
|
||||
@ -19,20 +20,38 @@ const (
|
||||
func findMachoFeatures(data *file.Executable, reader unionreader.UnionReader) error {
|
||||
// TODO: support security features
|
||||
|
||||
// TODO: support multi-architecture binaries
|
||||
f, err := macho.NewFile(reader)
|
||||
// a universal binary may have multiple architectures, so we need to check each one
|
||||
readers, err := unionreader.GetReaders(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
libs, err := f.ImportedLibraries()
|
||||
var libs []string
|
||||
for _, r := range readers {
|
||||
f, err := macho.NewFile(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data.ImportedLibraries = libs
|
||||
rLibs, err := f.ImportedLibraries()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
libs = append(libs, rLibs...)
|
||||
|
||||
// TODO handle only some having entrypoints/exports? If that is even practical
|
||||
// only check for entrypoint if we don't already have one
|
||||
if !data.HasEntrypoint {
|
||||
data.HasEntrypoint = machoHasEntrypoint(f)
|
||||
}
|
||||
// only check for exports if we don't already have them
|
||||
if !data.HasExports {
|
||||
data.HasExports = machoHasExports(f)
|
||||
}
|
||||
}
|
||||
|
||||
// de-duplicate libraries
|
||||
data.ImportedLibraries = internal.NewSet(libs...).ToSlice()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anchore/syft/syft/file"
|
||||
"github.com/anchore/syft/syft/internal/unionreader"
|
||||
)
|
||||
|
||||
@ -83,3 +84,39 @@ func Test_machoHasExports(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_machoUniversal(t *testing.T) {
|
||||
readerForFixture := func(t *testing.T, fixture string) unionreader.UnionReader {
|
||||
t.Helper()
|
||||
f, err := os.Open(filepath.Join("test-fixtures/shared-info", fixture))
|
||||
require.NoError(t, err)
|
||||
return f
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fixture string
|
||||
want file.Executable
|
||||
}{
|
||||
{
|
||||
name: "universal lib",
|
||||
fixture: "bin/libhello_universal.dylib",
|
||||
want: file.Executable{HasExports: true, HasEntrypoint: false},
|
||||
},
|
||||
{
|
||||
name: "universal application",
|
||||
fixture: "bin/hello_mac_universal",
|
||||
want: file.Executable{HasExports: false, HasEntrypoint: true},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var data file.Executable
|
||||
err := findMachoFeatures(&data, readerForFixture(t, tt.fixture))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tt.want.HasEntrypoint, data.HasEntrypoint)
|
||||
assert.Equal(t, tt.want.HasExports, data.HasExports)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,13 +2,13 @@
|
||||
|
||||
BIN=../../bin
|
||||
|
||||
all: $(BIN)/hello_linux $(BIN)/hello.exe $(BIN)/hello_mac
|
||||
all: $(BIN)/hello_linux $(BIN)/hello.exe $(BIN)/hello_mac $(BIN)/hello_mac_universal
|
||||
|
||||
linux: $(BIN)/libhello.so
|
||||
|
||||
windows: $(BIN)/libhello.dll
|
||||
|
||||
mac: $(BIN)/libhello.dylib
|
||||
mac: $(BIN)/libhello.dylib $(BIN)/hello_mac_universal
|
||||
|
||||
$(BIN)/hello_linux:
|
||||
gcc hello.c -o $(BIN)/hello_linux
|
||||
@ -19,5 +19,8 @@ $(BIN)/hello.exe:
|
||||
$(BIN)/hello_mac:
|
||||
o64-clang hello.c -o $(BIN)/hello_mac
|
||||
|
||||
$(BIN)/hello_mac_universal:
|
||||
o64-clang -arch arm64 -arch x86_64 hello.c -o $(BIN)/hello_mac_universal
|
||||
|
||||
clean:
|
||||
rm -f $(BIN)/hello_linux $(BIN)/hello.exe $(BIN)/hello_mac
|
||||
rm -f $(BIN)/hello_linux $(BIN)/hello.exe $(BIN)/hello_mac $(BIN)/hello_mac_universal
|
||||
|
||||
@ -2,13 +2,13 @@
|
||||
|
||||
BIN=../../bin
|
||||
|
||||
all: $(BIN)/libhello.so $(BIN)/libhello.dll $(BIN)/libhello.dylib
|
||||
all: $(BIN)/libhello.so $(BIN)/libhello.dll $(BIN)/libhello.dylib $(BIN)/libhello_universal.dylib
|
||||
|
||||
linux: $(BIN)/libhello.so
|
||||
|
||||
windows: $(BIN)/libhello.dll
|
||||
|
||||
mac: $(BIN)/libhello.dylib
|
||||
mac: $(BIN)/libhello.dylib $(BIN)/libhello_universal.dylib
|
||||
|
||||
$(BIN)/libhello.so:
|
||||
gcc -shared -fPIC -o $(BIN)/libhello.so hello.c
|
||||
@ -19,5 +19,8 @@ $(BIN)/libhello.dll:
|
||||
$(BIN)/libhello.dylib:
|
||||
o64-clang -dynamiclib -o $(BIN)/libhello.dylib hello.c
|
||||
|
||||
$(BIN)/libhello_universal.dylib:
|
||||
o64-clang -dynamiclib -arch arm64 -arch x86_64 hello.c -o $(BIN)/libhello_universal.dylib
|
||||
|
||||
clean:
|
||||
rm -f $(BIN)/libhello.so $(BIN)/hello.dll $(BIN)/libhello.dylib $(BIN)/libhello.a
|
||||
rm -f $(BIN)/libhello.so $(BIN)/hello.dll $(BIN)/libhello.dylib $(BIN)/libhello.a $(BIN)/libhello_universal.dylib
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"github.com/scylladb/go-set/strset"
|
||||
)
|
||||
|
||||
// CoordinateSet provides a unique collection of Coordinates with set operations.
|
||||
type CoordinateSet struct {
|
||||
set map[Coordinates]struct{}
|
||||
}
|
||||
|
||||
@ -9,8 +9,11 @@ import (
|
||||
|
||||
// Coordinates contains the minimal information needed to describe how to find a file within any possible source object (e.g. image and directory sources)
|
||||
type Coordinates struct {
|
||||
RealPath string `json:"path" cyclonedx:"path"` // The path where all path ancestors have no hardlinks / symlinks
|
||||
FileSystemID string `json:"layerID,omitempty" cyclonedx:"layerID"` // An ID representing the filesystem. For container images, this is a layer digest. For directories or a root filesystem, this is blank.
|
||||
// RealPath is the canonical absolute form of the path accessed (all symbolic links have been followed and relative path components like '.' and '..' have been removed).
|
||||
RealPath string `json:"path" cyclonedx:"path"`
|
||||
|
||||
// FileSystemID is an ID representing and entire filesystem. For container images, this is a layer digest. For directories or a root filesystem, this is blank.
|
||||
FileSystemID string `json:"layerID,omitempty" cyclonedx:"layerID"`
|
||||
}
|
||||
|
||||
func NewCoordinates(realPath, fsID string) Coordinates {
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
package file
|
||||
|
||||
// Digest represents a cryptographic hash of file contents.
|
||||
type Digest struct {
|
||||
// Algorithm specifies the hash algorithm used (e.g., "sha256", "md5").
|
||||
Algorithm string `json:"algorithm"`
|
||||
|
||||
// Value is the hexadecimal string representation of the hash.
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
@ -1,39 +1,59 @@
|
||||
package file
|
||||
|
||||
type (
|
||||
// ExecutableFormat represents the binary executable format type.
|
||||
ExecutableFormat string
|
||||
|
||||
// RelocationReadOnly indicates the RELRO security protection level applied to an ELF binary.
|
||||
RelocationReadOnly string
|
||||
)
|
||||
|
||||
const (
|
||||
ELF ExecutableFormat = "elf"
|
||||
MachO ExecutableFormat = "macho"
|
||||
PE ExecutableFormat = "pe"
|
||||
ELF ExecutableFormat = "elf" // Executable and Linkable Format used on Unix-like systems
|
||||
MachO ExecutableFormat = "macho" // Mach object file format used on macOS and iOS
|
||||
PE ExecutableFormat = "pe" // Portable Executable format used on Windows
|
||||
|
||||
RelocationReadOnlyNone RelocationReadOnly = "none"
|
||||
RelocationReadOnlyPartial RelocationReadOnly = "partial"
|
||||
RelocationReadOnlyFull RelocationReadOnly = "full"
|
||||
RelocationReadOnlyNone RelocationReadOnly = "none" // no RELRO protection
|
||||
RelocationReadOnlyPartial RelocationReadOnly = "partial" // partial RELRO protection
|
||||
RelocationReadOnlyFull RelocationReadOnly = "full" // full RELRO protection
|
||||
)
|
||||
|
||||
// Executable contains metadata about binary files and their security features.
|
||||
type Executable struct {
|
||||
// Format denotes either ELF, Mach-O, or PE
|
||||
Format ExecutableFormat `json:"format" yaml:"format" mapstructure:"format"`
|
||||
|
||||
// HasExports indicates whether the binary exports symbols.
|
||||
HasExports bool `json:"hasExports" yaml:"hasExports" mapstructure:"hasExports"`
|
||||
|
||||
// HasEntrypoint indicates whether the binary has an entry point function.
|
||||
HasEntrypoint bool `json:"hasEntrypoint" yaml:"hasEntrypoint" mapstructure:"hasEntrypoint"`
|
||||
|
||||
// ImportedLibraries lists the shared libraries required by this executable.
|
||||
ImportedLibraries []string `json:"importedLibraries" yaml:"importedLibraries" mapstructure:"importedLibraries"`
|
||||
|
||||
// ELFSecurityFeatures contains ELF-specific security hardening information when Format is ELF.
|
||||
ELFSecurityFeatures *ELFSecurityFeatures `json:"elfSecurityFeatures,omitempty" yaml:"elfSecurityFeatures" mapstructure:"elfSecurityFeatures"`
|
||||
}
|
||||
|
||||
// ELFSecurityFeatures captures security hardening and protection mechanisms in ELF binaries.
|
||||
type ELFSecurityFeatures struct {
|
||||
// SymbolTableStripped indicates whether debugging symbols have been removed.
|
||||
SymbolTableStripped bool `json:"symbolTableStripped" yaml:"symbolTableStripped" mapstructure:"symbolTableStripped"`
|
||||
|
||||
// classic protections
|
||||
|
||||
// StackCanary indicates whether stack smashing protection is enabled.
|
||||
StackCanary *bool `json:"stackCanary,omitempty" yaml:"stackCanary" mapstructure:"stackCanary"`
|
||||
|
||||
// NoExecutable indicates whether NX (no-execute) protection is enabled for the stack.
|
||||
NoExecutable bool `json:"nx" yaml:"nx" mapstructure:"nx"`
|
||||
|
||||
// RelocationReadOnly indicates the RELRO protection level.
|
||||
RelocationReadOnly RelocationReadOnly `json:"relRO" yaml:"relRO" mapstructure:"relRO"`
|
||||
|
||||
// PositionIndependentExecutable indicates whether the binary is compiled as PIE.
|
||||
PositionIndependentExecutable bool `json:"pie" yaml:"pie" mapstructure:"pie"`
|
||||
|
||||
// DynamicSharedObject indicates whether the binary is a shared library.
|
||||
DynamicSharedObject bool `json:"dso" yaml:"dso" mapstructure:"dso"`
|
||||
|
||||
// LlvmSafeStack represents a compiler-based security mechanism that separates the stack into a safe stack for storing return addresses and other critical data, and an unsafe stack for everything else, to mitigate stack-based memory corruption errors
|
||||
|
||||
@ -5,17 +5,32 @@ import (
|
||||
"github.com/anchore/syft/syft/license"
|
||||
)
|
||||
|
||||
// License represents license information discovered within a file.
|
||||
type License struct {
|
||||
// Value is the raw license string as found in the file.
|
||||
Value string
|
||||
|
||||
// SPDXExpression is the parsed SPDX license expression if available.
|
||||
SPDXExpression string
|
||||
|
||||
// Type categorizes how the license was determined (e.g., declared, concluded -- following the same semantics as SPDX).
|
||||
Type license.Type
|
||||
LicenseEvidence *LicenseEvidence // evidence from license classifier
|
||||
|
||||
LicenseEvidence *LicenseEvidence
|
||||
|
||||
// Contents optionally stores the full license text.
|
||||
Contents string `hash:"ignore"`
|
||||
}
|
||||
|
||||
// LicenseEvidence contains details from license classifier analysis.
|
||||
type LicenseEvidence struct {
|
||||
// Confidence is a score indicating certainty of the license match.
|
||||
Confidence int
|
||||
|
||||
// Offset is the byte position where the license text begins in the file.
|
||||
Offset int
|
||||
|
||||
// Extent is the length in bytes of the matched license text.
|
||||
Extent int
|
||||
}
|
||||
|
||||
|
||||
@ -27,18 +27,24 @@ type Location struct {
|
||||
LocationMetadata `cyclonedx:""`
|
||||
}
|
||||
|
||||
// LocationData contains the core identifying information for a file location.
|
||||
type LocationData struct {
|
||||
Coordinates `cyclonedx:""` // Empty string here means there is no intermediate property name, e.g. syft:locations:0:path without "coordinates"
|
||||
// note: it is IMPORTANT to ignore anything but the coordinates for a Location when considering the ID (hash value)
|
||||
// since the coordinates are the minimally correct ID for a location (symlinks should not come into play)
|
||||
AccessPath string `hash:"ignore" json:"accessPath"` // The path to the file which may or may not have hardlinks / symlinks
|
||||
ref file.Reference `hash:"ignore"` // The file reference relative to the stereoscope.FileCatalog that has more information about this location.
|
||||
|
||||
// AccessPath is the path used to retrieve file contents (which may or may not have hardlinks / symlinks in the path)
|
||||
AccessPath string `hash:"ignore" json:"accessPath"`
|
||||
|
||||
// ref is the stereoscope file reference relative to the stereoscope.FileCatalog that has more information about this location.
|
||||
ref file.Reference `hash:"ignore"`
|
||||
}
|
||||
|
||||
func (l LocationData) Reference() file.Reference {
|
||||
return l.ref
|
||||
}
|
||||
|
||||
// LocationMetadata provides additional contextual information about a file location.
|
||||
type LocationMetadata struct {
|
||||
Annotations map[string]string `json:"annotations,omitempty"` // Arbitrary key-value pairs that can be used to annotate a location
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package file
|
||||
|
||||
import "io"
|
||||
|
||||
// LocationReadCloser combines a Location with a ReadCloser for accessing file content with location metadata.
|
||||
type LocationReadCloser struct {
|
||||
Location
|
||||
io.ReadCloser
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"github.com/anchore/syft/internal/log"
|
||||
)
|
||||
|
||||
// LocationSet provides a unique collection of Locations with metadata and set operations.
|
||||
type LocationSet struct {
|
||||
set map[LocationData]LocationMetadata
|
||||
}
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
|
||||
var locationSorterWithoutLayers = LocationSorter(nil)
|
||||
|
||||
// Locations is a sortable slice of Location values.
|
||||
type Locations []Location
|
||||
|
||||
func (l Locations) Len() int {
|
||||
|
||||
@ -18,6 +18,7 @@ type ContentResolver interface {
|
||||
FileContentsByLocation(Location) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// MetadataResolver provides file metadata lookup by location.
|
||||
type MetadataResolver interface {
|
||||
FileMetadataByLocation(Location) (Metadata, error)
|
||||
}
|
||||
@ -51,6 +52,7 @@ type PathResolver interface {
|
||||
RelativeFileByPath(_ Location, path string) *Location
|
||||
}
|
||||
|
||||
// LocationResolver provides iteration over all file locations in a source.
|
||||
type LocationResolver interface {
|
||||
// AllLocations returns a channel of all file references from the underlying source.
|
||||
// The implementation for this may vary, however, generally the following considerations should be made:
|
||||
@ -59,6 +61,7 @@ type LocationResolver interface {
|
||||
AllLocations(ctx context.Context) <-chan Location
|
||||
}
|
||||
|
||||
// WritableResolver extends Resolver with the ability to write file content.
|
||||
type WritableResolver interface {
|
||||
Resolver
|
||||
|
||||
|
||||
@ -4,12 +4,24 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// SearchResult represents a match found during content scanning, such as secret detection.
|
||||
type SearchResult struct {
|
||||
// Classification identifies the type or category of the matched content.
|
||||
Classification string `json:"classification"`
|
||||
|
||||
// LineNumber is the 1-indexed line number where the match was found.
|
||||
LineNumber int64 `json:"lineNumber"`
|
||||
|
||||
// LineOffset is the character offset from the start of the line where the match begins.
|
||||
LineOffset int64 `json:"lineOffset"`
|
||||
|
||||
// SeekPosition is the absolute byte offset from the start of the file.
|
||||
SeekPosition int64 `json:"seekPosition"`
|
||||
|
||||
// Length is the size in bytes of the matched content.
|
||||
Length int64 `json:"length"`
|
||||
|
||||
// Value optionally contains the actual matched content.
|
||||
Value string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
package file
|
||||
|
||||
const (
|
||||
NoFilesSelection Selection = "none"
|
||||
FilesOwnedByPackageSelection Selection = "owned-by-package"
|
||||
AllFilesSelection Selection = "all"
|
||||
NoFilesSelection Selection = "none" // no files are selected
|
||||
FilesOwnedByPackageSelection Selection = "owned-by-package" // only files owned by packages are selected
|
||||
AllFilesSelection Selection = "all" // all files are selected
|
||||
)
|
||||
|
||||
// Selection defines which files should be included during cataloging operations.
|
||||
type Selection string
|
||||
|
||||
@ -48,7 +48,7 @@ func ToFormatModel(s sbom.SBOM) *cyclonedx.BOM {
|
||||
packages := s.Artifacts.Packages.Sorted()
|
||||
components := make([]cyclonedx.Component, len(packages))
|
||||
for i, p := range packages {
|
||||
components[i] = helpers.EncodeComponent(p, locationSorter)
|
||||
components[i] = helpers.EncodeComponent(p, s.Source.Supplier, locationSorter)
|
||||
}
|
||||
components = append(components, toOSComponent(s.Artifacts.LinuxDistribution)...)
|
||||
|
||||
@ -220,11 +220,22 @@ func toBomDescriptor(name, version string, srcMetadata source.Description) *cycl
|
||||
},
|
||||
},
|
||||
},
|
||||
Supplier: toBomSupplier(srcMetadata),
|
||||
Properties: toBomProperties(srcMetadata),
|
||||
Component: toBomDescriptorComponent(srcMetadata),
|
||||
}
|
||||
}
|
||||
|
||||
func toBomSupplier(srcMetadata source.Description) *cyclonedx.OrganizationalEntity {
|
||||
if srcMetadata.Supplier != "" {
|
||||
return &cyclonedx.OrganizationalEntity{
|
||||
Name: srcMetadata.Supplier,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// used to indicate that a relationship listed under the syft artifact package can be represented as a cyclonedx dependency.
|
||||
// NOTE: CycloneDX provides the ability to describe components and their dependency on other components.
|
||||
// The dependency graph is capable of representing both direct and transitive relationships.
|
||||
@ -325,6 +336,7 @@ func toBomDescriptorComponent(srcMetadata source.Description) *cyclonedx.Compone
|
||||
Type: cyclonedx.ComponentTypeContainer,
|
||||
Name: name,
|
||||
Version: version,
|
||||
Supplier: toBomSupplier(srcMetadata),
|
||||
}
|
||||
case source.DirectoryMetadata:
|
||||
if name == "" {
|
||||
@ -340,6 +352,7 @@ func toBomDescriptorComponent(srcMetadata source.Description) *cyclonedx.Compone
|
||||
Type: cyclonedx.ComponentTypeFile,
|
||||
Name: name,
|
||||
Version: version,
|
||||
Supplier: toBomSupplier(srcMetadata),
|
||||
}
|
||||
case source.FileMetadata:
|
||||
if name == "" {
|
||||
@ -355,6 +368,7 @@ func toBomDescriptorComponent(srcMetadata source.Description) *cyclonedx.Compone
|
||||
Type: cyclonedx.ComponentTypeFile,
|
||||
Name: name,
|
||||
Version: version,
|
||||
Supplier: toBomSupplier(srcMetadata),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -389,7 +389,69 @@ func Test_toBomDescriptor(t *testing.T) {
|
||||
Name: "syft:image:labels:key1",
|
||||
Value: "value1",
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with optional supplier is on the root component and bom metadata",
|
||||
args: args{
|
||||
name: "test-image",
|
||||
version: "1.0.0",
|
||||
srcMetadata: source.Description{
|
||||
Name: "test-image",
|
||||
Version: "1.0.0",
|
||||
Supplier: "optional-supplier",
|
||||
Metadata: source.ImageMetadata{},
|
||||
},
|
||||
},
|
||||
want: &cyclonedx.Metadata{
|
||||
Timestamp: "",
|
||||
Lifecycles: nil,
|
||||
Tools: &cyclonedx.ToolsChoice{
|
||||
Components: &[]cyclonedx.Component{
|
||||
{
|
||||
Type: cyclonedx.ComponentTypeApplication,
|
||||
Author: "anchore",
|
||||
Name: "test-image",
|
||||
Version: "1.0.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Authors: nil,
|
||||
Component: &cyclonedx.Component{
|
||||
BOMRef: "",
|
||||
MIMEType: "",
|
||||
Type: "container",
|
||||
Supplier: &cyclonedx.OrganizationalEntity{
|
||||
Name: "optional-supplier",
|
||||
},
|
||||
Author: "",
|
||||
Publisher: "",
|
||||
Group: "",
|
||||
Name: "test-image",
|
||||
Version: "1.0.0",
|
||||
Description: "",
|
||||
Scope: "",
|
||||
Hashes: nil,
|
||||
Licenses: nil,
|
||||
Copyright: "",
|
||||
CPE: "",
|
||||
PackageURL: "",
|
||||
SWID: nil,
|
||||
Modified: nil,
|
||||
Pedigree: nil,
|
||||
ExternalReferences: nil,
|
||||
Properties: nil,
|
||||
Components: nil,
|
||||
Evidence: nil,
|
||||
ReleaseNotes: nil,
|
||||
},
|
||||
Manufacture: nil,
|
||||
Supplier: &cyclonedx.OrganizationalEntity{
|
||||
Name: "optional-supplier",
|
||||
},
|
||||
Licenses: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user