Add ability to scan snaps (as a source) (#3929)

This commit is contained in:
Alex Goodman 2025-06-25 16:53:35 -04:00 committed by GitHub
parent 4eb8ba4575
commit 2bda086423
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
63 changed files with 5060 additions and 175 deletions

View File

@ -0,0 +1,8 @@
[TestHandler_handlePullSourceStarted/snap_download_in_progress - 1]
⠋ Downloading snap file... ━━━━━━━━━━━━━━━━━━━━ example-app_1.0_amd64.snap
---
[TestHandler_handlePullSourceStarted/snap_download_complete - 1]
✔ Snap downloaded successfully example-app_1.0_amd64.snap
---

View File

@ -26,7 +26,7 @@ func TestHandler_handleCatalogerTaskStarted(t *testing.T) {
{
name: "cataloging task in progress",
eventFn: func(t *testing.T) partybus.Event {
value := &monitor.CatalogerTaskProgress{
value := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage("some stage"),
Manual: progress.NewManual(100),
}
@ -48,7 +48,7 @@ func TestHandler_handleCatalogerTaskStarted(t *testing.T) {
{
name: "cataloging sub task in progress",
eventFn: func(t *testing.T) partybus.Event {
value := &monitor.CatalogerTaskProgress{
value := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage("some stage"),
Manual: progress.NewManual(100),
}
@ -71,7 +71,7 @@ func TestHandler_handleCatalogerTaskStarted(t *testing.T) {
{
name: "cataloging sub task complete",
eventFn: func(t *testing.T) partybus.Event {
value := &monitor.CatalogerTaskProgress{
value := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage("some stage"),
Manual: progress.NewManual(100),
}
@ -94,7 +94,7 @@ func TestHandler_handleCatalogerTaskStarted(t *testing.T) {
{
name: "cataloging sub task complete -- hide stage",
eventFn: func(t *testing.T) partybus.Event {
value := &monitor.CatalogerTaskProgress{
value := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage("some stage"),
Manual: progress.NewManual(100),
}
@ -117,7 +117,7 @@ func TestHandler_handleCatalogerTaskStarted(t *testing.T) {
{
name: "cataloging sub task complete with removal",
eventFn: func(t *testing.T) partybus.Event {
value := &monitor.CatalogerTaskProgress{
value := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage("some stage"),
Manual: progress.NewManual(100),
}
@ -162,7 +162,7 @@ func TestHandler_handleCatalogerTaskStarted(t *testing.T) {
}
// note: this line / event is not under test, only needed to show a sub status
kickoffEvent := &monitor.CatalogerTaskProgress{
kickoffEvent := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage(""),
Manual: progress.NewManual(-1),
}

View File

@ -0,0 +1,37 @@
package ui
import (
tea "github.com/charmbracelet/bubbletea"
"github.com/wagoodman/go-partybus"
"github.com/anchore/bubbly/bubbles/taskprogress"
"github.com/anchore/syft/internal/log"
syftEventParsers "github.com/anchore/syft/syft/event/parsers"
)
func (m *Handler) handlePullSourceStarted(e partybus.Event) []tea.Model {
prog, info, err := syftEventParsers.ParsePullSourceStarted(e)
if err != nil {
log.WithFields("error", err).Debug("unable to parse event")
return nil
}
tsk := m.newTaskProgress(
taskprogress.Title{
Default: info.Title.Default,
Running: info.Title.WhileRunning,
Success: info.Title.OnSuccess,
},
taskprogress.WithStagedProgressable(prog),
)
tsk.HideOnSuccess = info.HideOnSuccess
tsk.HideStageOnSuccess = info.HideStageOnSuccess
tsk.HideProgressOnSuccess = true
if info.Context != "" {
tsk.Context = []string{info.Context}
}
return []tea.Model{tsk}
}

View File

@ -0,0 +1,121 @@
package ui
import (
"testing"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/gkampitakis/go-snaps/snaps"
"github.com/stretchr/testify/require"
"github.com/wagoodman/go-partybus"
"github.com/wagoodman/go-progress"
"github.com/anchore/bubbly/bubbles/taskprogress"
"github.com/anchore/syft/syft/event"
"github.com/anchore/syft/syft/event/monitor"
)
func TestHandler_handlePullSourceStarted(t *testing.T) {
tests := []struct {
name string
eventFn func(*testing.T) partybus.Event
iterations int
}{
{
name: "snap download in progress",
eventFn: func(t *testing.T) partybus.Event {
stage := progress.NewAtomicStage("")
manual := progress.NewManual(0)
manual.SetTotal(1000000) // 1MB file
manual.Set(250000) // 25% downloaded
taskProg := &monitor.TaskProgress{
AtomicStage: stage,
Manual: manual,
}
genericTask := monitor.GenericTask{
Title: monitor.Title{
Default: "Downloading snap",
WhileRunning: "Downloading snap file...",
OnSuccess: "Snap downloaded",
},
Context: "example-app_1.0_amd64.snap",
HideOnSuccess: false,
HideStageOnSuccess: true,
ID: "snap-download-123",
}
return partybus.Event{
Type: event.PullSourceStarted,
Source: genericTask,
Value: taskProg,
}
},
iterations: 5,
},
{
name: "snap download complete",
eventFn: func(t *testing.T) partybus.Event {
stage := progress.NewAtomicStage("")
manual := progress.NewManual(0)
manual.SetTotal(1000000) // 1MB file
manual.Set(1000000) // 100% downloaded
manual.SetCompleted()
taskProg := &monitor.TaskProgress{
AtomicStage: stage,
Manual: manual,
}
genericTask := monitor.GenericTask{
Title: monitor.Title{
Default: "Downloading snap",
WhileRunning: "Downloading snap file...",
OnSuccess: "Snap downloaded successfully",
},
Context: "example-app_1.0_amd64.snap",
HideOnSuccess: false,
HideStageOnSuccess: true,
ID: "snap-download-123",
}
return partybus.Event{
Type: event.PullSourceStarted,
Source: genericTask,
Value: taskProg,
}
},
iterations: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
event := tt.eventFn(t)
handler := New(DefaultHandlerConfig())
handler.WindowSize = tea.WindowSizeMsg{
Width: 100,
Height: 80,
}
models := handler.handlePullSourceStarted(event)
require.Len(t, models, 1)
model := models[0]
tsk, ok := model.(taskprogress.Model)
require.True(t, ok)
gotModel := runModel(t, tsk, tt.iterations, taskprogress.TickMsg{
Time: time.Now(),
Sequence: tsk.Sequence(),
ID: tsk.ID(),
})
got := gotModel.View()
t.Log(got)
snaps.MatchSnapshot(t, got)
})
}
}

View File

@ -57,6 +57,7 @@ func New(cfg HandlerConfig) *Handler {
stereoscopeEvent.FetchImage: simpleHandler(h.handleFetchImage),
syftEvent.FileIndexingStarted: simpleHandler(h.handleFileIndexingStarted),
syftEvent.AttestationStarted: simpleHandler(h.handleAttestationStarted),
syftEvent.PullSourceStarted: simpleHandler(h.handlePullSourceStarted),
syftEvent.CatalogerTaskStarted: h.handleCatalogerTaskStarted,
})

View File

@ -44,7 +44,7 @@ func AppClioSetupConfig(id clio.Identification, out io.Writer) *clio.SetupConfig
redact.Set(state.RedactStore)
log.Set(state.Logger)
stereoscope.SetLogger(state.Logger)
stereoscope.SetLogger(state.Logger.Nested("from", "stereoscope"))
return nil
},
).

30
go.mod
View File

@ -35,6 +35,7 @@ require (
github.com/charmbracelet/lipgloss v1.1.0
github.com/dave/jennifer v1.7.1
github.com/deitch/magic v0.0.0-20230404182410-1ff89d7342da
github.com/diskfs/go-diskfs v1.6.1-0.20250601133945-2af1c7ece24c
github.com/distribution/reference v0.6.0
github.com/dustin/go-humanize v1.0.1
github.com/elliotchance/phpserialize v1.4.0
@ -51,6 +52,8 @@ require (
github.com/google/licensecheck v0.3.1
github.com/google/uuid v1.6.0
github.com/gookit/color v1.5.4
github.com/hashicorp/go-cleanhttp v0.5.2
github.com/hashicorp/go-getter v1.7.8
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/hcl/v2 v2.23.0
github.com/iancoleman/strcase v0.3.0
@ -93,6 +96,12 @@ require (
)
require (
cloud.google.com/go v0.116.0 // indirect
cloud.google.com/go/auth v0.9.9 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect
cloud.google.com/go/iam v1.2.2 // indirect
cloud.google.com/go/storage v1.43.0 // indirect
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
@ -103,14 +112,17 @@ require (
github.com/ProtonMail/go-crypto v1.2.0 // indirect
github.com/STARRY-S/zip v0.2.1 // indirect
github.com/agext/levenshtein v1.2.1 // indirect; indirectt
github.com/anchore/go-lzo v0.1.0 // indirect
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/aquasecurity/go-version v0.0.1 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aws/aws-sdk-go v1.44.122 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/becheran/wildmatch-go v1.0.0 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/sevenzip v1.6.0 // indirect
github.com/bodgit/windows v1.0.1 // indirect
@ -153,17 +165,23 @@ require (
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-restruct/restruct v1.2.0-alpha // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/goccy/go-yaml v1.18.0
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e // indirect
github.com/google/s2a-go v0.1.8 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
@ -180,6 +198,7 @@ require (
github.com/minio/minlz v1.0.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
@ -203,6 +222,7 @@ require (
github.com/pjbgf/sha1cd v0.3.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pkg/profile v1.7.0 // indirect
github.com/pkg/xattr v0.4.9 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.7 // indirect
@ -210,13 +230,14 @@ require (
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sahilm/fuzzy v0.1.1 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
github.com/skeema/knownhosts v1.3.1 // indirect
github.com/sorairolake/lzip-go v0.3.5 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/spf13/viper v1.20.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/sylabs/sif/v2 v2.21.1 // indirect
github.com/sylabs/squashfs v1.0.6 // indirect
@ -235,6 +256,7 @@ require (
github.com/zclconf/go-cty v1.13.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
@ -243,13 +265,17 @@ require (
go.uber.org/multierr v1.9.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/crypto v0.39.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.26.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.34.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/api v0.203.0 // indirect
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241113202542-65e8d215514f // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
google.golang.org/grpc v1.67.3 // indirect
google.golang.org/protobuf v1.36.4 // indirect

1021
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -36,8 +36,8 @@ func Notify(message string) {
})
}
func StartCatalogerTask(info monitor.GenericTask, size int64, initialStage string) *monitor.CatalogerTaskProgress {
t := &monitor.CatalogerTaskProgress{
func StartCatalogerTask(info monitor.GenericTask, size int64, initialStage string) *monitor.TaskProgress {
t := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage(initialStage),
Manual: progress.NewManual(size),
}
@ -50,3 +50,33 @@ func StartCatalogerTask(info monitor.GenericTask, size int64, initialStage strin
return t
}
func StartPullSourceTask(info monitor.GenericTask, size int64, initialStage string) *monitor.TaskProgress {
t := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage(initialStage),
Manual: progress.NewManual(size),
}
Publish(partybus.Event{
Type: event.PullSourceStarted,
Source: info,
Value: progress.StagedProgressable(t),
})
return t
}
func StartIndexingFiles(path string) *monitor.TaskProgress {
t := &monitor.TaskProgress{
AtomicStage: progress.NewAtomicStage(""),
Manual: progress.NewManual(-1),
}
Publish(partybus.Event{
Type: event.FileIndexingStarted,
Source: path,
Value: progress.StagedProgressable(t),
})
return t
}

154
internal/file/getter.go Normal file
View File

@ -0,0 +1,154 @@
package file
import (
"fmt"
"io"
"net/http"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/go-getter/helper/url"
"github.com/wagoodman/go-progress"
"github.com/anchore/clio"
"github.com/anchore/stereoscope/pkg/file"
"github.com/anchore/syft/internal"
)
var (
archiveExtensions = getterDecompressorNames()
ErrNonArchiveSource = fmt.Errorf("non-archive sources are not supported for directory destinations")
)
type Getter interface {
// GetFile downloads the give URL into the given path. The URL must reference a single file.
GetFile(dst, src string, monitor ...*progress.Manual) error
// GetToDir downloads the resource found at the `src` URL into the given `dst` directory.
// The directory must already exist, and the remote resource MUST BE AN ARCHIVE (e.g. `.tar.gz`).
GetToDir(dst, src string, monitor ...*progress.Manual) error
}
type HashiGoGetter struct {
httpGetter getter.HttpGetter
}
// NewGetter creates and returns a new Getter. Providing an http.Client is optional. If one is provided,
// it will be used for all HTTP(S) getting; otherwise, go-getter's default getters will be used.
func NewGetter(id clio.Identification, httpClient *http.Client) *HashiGoGetter {
return &HashiGoGetter{
httpGetter: getter.HttpGetter{
Client: httpClient,
Header: http.Header{
"User-Agent": []string{fmt.Sprintf("%v %v", id.Name, id.Version)},
},
},
}
}
func (g HashiGoGetter) GetFile(dst, src string, monitors ...*progress.Manual) error {
if len(monitors) > 1 {
return fmt.Errorf("multiple monitors provided, which is not allowed")
}
return getterClient(dst, src, false, g.httpGetter, monitors).Get()
}
func (g HashiGoGetter) GetToDir(dst, src string, monitors ...*progress.Manual) error {
// though there are multiple getters, only the http/https getter requires extra validation
if err := validateHTTPSource(src); err != nil {
return err
}
if len(monitors) > 1 {
return fmt.Errorf("multiple monitors provided, which is not allowed")
}
return getterClient(dst, src, true, g.httpGetter, monitors).Get()
}
func validateHTTPSource(src string) error {
// we are ignoring any sources that are not destined to use the http getter object
if !internal.HasAnyOfPrefixes(src, "http://", "https://") {
return nil
}
u, err := url.Parse(src)
if err != nil {
return fmt.Errorf("bad URL provided %q: %w", src, err)
}
// only allow for sources with archive extensions
if !internal.HasAnyOfSuffixes(u.Path, archiveExtensions...) {
return ErrNonArchiveSource
}
return nil
}
func getterClient(dst, src string, dir bool, httpGetter getter.HttpGetter, monitors []*progress.Manual) *getter.Client {
client := &getter.Client{
Src: src,
Dst: dst,
Dir: dir,
Getters: map[string]getter.Getter{
"http": &httpGetter,
"https": &httpGetter,
// note: these are the default getters from https://github.com/hashicorp/go-getter/blob/v1.5.9/get.go#L68-L74
// it is possible that other implementations need to account for custom httpclient injection, however,
// that has not been accounted for at this time.
"file": new(getter.FileGetter),
"git": new(getter.GitGetter),
"gcs": new(getter.GCSGetter),
"hg": new(getter.HgGetter),
"s3": new(getter.S3Getter),
},
Options: mapToGetterClientOptions(monitors),
}
return client
}
func withProgress(monitor *progress.Manual) func(client *getter.Client) error {
return getter.WithProgress(
&progressAdapter{monitor: monitor},
)
}
func mapToGetterClientOptions(monitors []*progress.Manual) []getter.ClientOption {
var result []getter.ClientOption
for _, monitor := range monitors {
result = append(result, withProgress(monitor))
}
// derived from https://github.com/hashicorp/go-getter/blob/v2.2.3/decompress.go#L23-L63
fileSizeLimit := int64(5 * file.GB)
dec := getter.LimitedDecompressors(0, fileSizeLimit)
result = append(result, getter.WithDecompressors(dec))
return result
}
type readCloser struct {
progress.Reader
}
func (c *readCloser) Close() error { return nil }
type progressAdapter struct {
monitor *progress.Manual
}
func (a *progressAdapter) TrackProgress(_ string, currentSize, totalSize int64, stream io.ReadCloser) io.ReadCloser {
a.monitor.Set(currentSize)
a.monitor.SetTotal(totalSize)
return &readCloser{
Reader: *progress.NewProxyReader(stream, a.monitor),
}
}
func getterDecompressorNames() (names []string) {
for name := range getter.Decompressors {
names = append(names, name)
}
return names
}

View File

@ -0,0 +1,268 @@
package file
import (
"archive/tar"
"bytes"
"context"
"crypto/x509"
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"path"
"testing"
"github.com/stretchr/testify/assert"
"github.com/anchore/clio"
)
func TestGetter_GetFile(t *testing.T) {
testCases := []struct {
name string
prepareClient func(*http.Client)
assert assert.ErrorAssertionFunc
}{
{
name: "client trusts server's CA",
assert: assert.NoError,
},
{
name: "client doesn't trust server's CA",
prepareClient: removeTrustedCAs,
assert: assertUnknownAuthorityError,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
requestPath := "/foo"
server := newTestServer(t, withResponseForPath(t, requestPath, testFileContent))
t.Cleanup(server.Close)
httpClient := getClient(t, server)
if tc.prepareClient != nil {
tc.prepareClient(httpClient)
}
getter := NewGetter(testID, httpClient)
requestURL := createRequestURL(t, server, requestPath)
tempDir := t.TempDir()
tempFile := path.Join(tempDir, "some-destination-file")
err := getter.GetFile(tempFile, requestURL)
tc.assert(t, err)
})
}
}
func TestGetter_GetToDir_FilterNonArchivesWired(t *testing.T) {
testCases := []struct {
name string
source string
assert assert.ErrorAssertionFunc
}{
{
name: "error out on non-archive sources",
source: "http://localhost/something.txt",
assert: assertErrNonArchiveSource,
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
test.assert(t, NewGetter(testID, nil).GetToDir(t.TempDir(), test.source))
})
}
}
func TestGetter_validateHttpSource(t *testing.T) {
testCases := []struct {
name string
source string
assert assert.ErrorAssertionFunc
}{
{
name: "error out on non-archive sources",
source: "http://localhost/something.txt",
assert: assertErrNonArchiveSource,
},
{
name: "filter out non-archive sources with get param",
source: "https://localhost/vulnerability-db_v3_2021-11-21T08:15:44Z.txt?checksum=sha256%3Ac402d01fa909a3fa85a5c6733ef27a3a51a9105b6c62b9152adbd24c08358911",
assert: assertErrNonArchiveSource,
},
{
name: "ignore non http-https input",
source: "s3://bucket/something.txt",
assert: assert.NoError,
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
test.assert(t, validateHTTPSource(test.source))
})
}
}
func TestGetter_GetToDir_CertConcerns(t *testing.T) {
testCases := []struct {
name string
prepareClient func(*http.Client)
assert assert.ErrorAssertionFunc
}{
{
name: "client trusts server's CA",
assert: assert.NoError,
},
{
name: "client doesn't trust server's CA",
prepareClient: removeTrustedCAs,
assert: assertUnknownAuthorityError,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
requestPath := "/foo.tar"
tarball := createTarball("foo", testFileContent)
server := newTestServer(t, withResponseForPath(t, requestPath, tarball))
t.Cleanup(server.Close)
httpClient := getClient(t, server)
if tc.prepareClient != nil {
tc.prepareClient(httpClient)
}
getter := NewGetter(testID, httpClient)
requestURL := createRequestURL(t, server, requestPath)
tempDir := t.TempDir()
err := getter.GetToDir(tempDir, requestURL)
tc.assert(t, err)
})
}
}
func assertUnknownAuthorityError(t assert.TestingT, err error, _ ...interface{}) bool {
return assert.ErrorAs(t, err, &x509.UnknownAuthorityError{})
}
func assertErrNonArchiveSource(t assert.TestingT, err error, _ ...interface{}) bool {
return assert.ErrorIs(t, err, ErrNonArchiveSource)
}
func removeTrustedCAs(client *http.Client) {
client.Transport.(*http.Transport).TLSClientConfig.RootCAs = x509.NewCertPool()
}
// createTarball makes a single-file tarball and returns it as a byte slice.
func createTarball(filename string, content []byte) []byte {
tarBuffer := new(bytes.Buffer)
tarWriter := tar.NewWriter(tarBuffer)
tarWriter.WriteHeader(&tar.Header{
Name: filename,
Size: int64(len(content)),
Mode: 0600,
})
tarWriter.Write(content)
tarWriter.Close()
return tarBuffer.Bytes()
}
type muxOption func(mux *http.ServeMux)
func withResponseForPath(t *testing.T, path string, response []byte) muxOption {
t.Helper()
return func(mux *http.ServeMux) {
mux.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
t.Logf("server handling request: %s %s", req.Method, req.URL)
_, err := w.Write(response)
if err != nil {
t.Fatal(err)
}
})
}
}
var testID = clio.Identification{
Name: "test-app",
Version: "v0.5.3",
}
func newTestServer(t *testing.T, muxOptions ...muxOption) *httptest.Server {
t.Helper()
mux := http.NewServeMux()
for _, option := range muxOptions {
option(mux)
}
server := httptest.NewTLSServer(mux)
t.Logf("new TLS server listening at %s", getHost(t, server))
return server
}
func createRequestURL(t *testing.T, server *httptest.Server, path string) string {
t.Helper()
// TODO: Figure out how to get this value from the server without hardcoding it here
const testServerCertificateName = "example.com"
serverURL, err := url.Parse(server.URL)
if err != nil {
t.Fatal(err)
}
// Set URL hostname to value from TLS certificate
serverURL.Host = fmt.Sprintf("%s:%s", testServerCertificateName, serverURL.Port())
serverURL.Path = path
return serverURL.String()
}
// getClient returns an http.Client that can be used to contact the test TLS server.
func getClient(t *testing.T, server *httptest.Server) *http.Client {
t.Helper()
httpClient := server.Client()
transport := httpClient.Transport.(*http.Transport)
serverHost := getHost(t, server)
transport.DialContext = func(_ context.Context, _, addr string) (net.Conn, error) {
t.Logf("client dialing %q for host %q", serverHost, addr)
// Ensure the client dials our test server
return net.Dial("tcp", serverHost)
}
return httpClient
}
// getHost extracts the host value from a server URL string.
// e.g. given a server with URL "http://1.2.3.4:5000/foo", getHost returns "1.2.3.4:5000"
func getHost(t *testing.T, server *httptest.Server) string {
t.Helper()
u, err := url.Parse(server.URL)
if err != nil {
t.Fatal(err)
}
return u.Hostname() + ":" + u.Port()
}
var testFileContent = []byte("This is the content of a test file!\n")

89
internal/file/squashfs.go Normal file
View File

@ -0,0 +1,89 @@
package file
import (
"errors"
"io/fs"
"os"
"path/filepath"
"github.com/diskfs/go-diskfs/filesystem"
)
type WalkDiskDirFunc func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error
// WalkDiskDir walks the file tree within the go-diskfs filesystem at root, calling fn for each file or directory in the tree, including root.
// This is meant to mimic the behavior of fs.WalkDir in the standard library.
func WalkDiskDir(fsys filesystem.FileSystem, root string, fn WalkDiskDirFunc) error {
infos, err := fsys.ReadDir(root)
if err != nil {
return err
}
if len(infos) == 0 {
return nil
}
for _, info := range infos {
p := filepath.Join(root, info.Name())
err = walkDiskDir(fsys, p, info, fn)
if err != nil {
if errors.Is(err, fs.SkipDir) {
continue
}
if errors.Is(err, fs.SkipAll) {
return nil
}
return err
}
}
return err
}
func walkDiskDir(fsys filesystem.FileSystem, name string, d os.FileInfo, walkDirFn WalkDiskDirFunc) error {
if err := walkDirFn(fsys, name, d, nil); err != nil {
if errors.Is(err, fs.SkipDir) && (d == nil || d.IsDir()) {
return nil
}
return err
}
isDir := d != nil && d.IsDir()
if d == nil {
_, err := fsys.ReadDir(name)
if err != nil {
return nil
}
isDir = true
}
if !isDir {
return nil
}
dirs, err := fsys.ReadDir(name)
if err != nil {
err = walkDirFn(fsys, name, d, err)
if err != nil {
if errors.Is(err, fs.SkipDir) {
return nil
}
return err
}
}
for _, d1 := range dirs {
name1 := filepath.Join(name, d1.Name())
if err := walkDiskDir(fsys, name1, d1, walkDirFn); err != nil {
if errors.Is(err, fs.SkipDir) {
break
}
if errors.Is(err, fs.SkipAll) {
return err
}
return err
}
}
return nil
}

View File

@ -0,0 +1,250 @@
package file
import (
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/diskfs/go-diskfs/backend/file"
"github.com/diskfs/go-diskfs/filesystem"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func createTestFS(t *testing.T) filesystem.FileSystem {
dir := t.TempDir()
filename := "test.squashfs"
f, err := os.Create(filepath.Join(dir, filename))
require.NoError(t, err)
b := file.New(f, false)
fsys, err := squashfs.Create(b, 0, 0, 4096)
require.NoError(t, err)
testFiles := []struct {
path string
content string
isDir bool
}{
{"/file1.txt", "content of file1", false},
{"/file2.txt", "content of file2", false},
{"/dir1", "", true},
{"/dir1/subfile1.txt", "content of subfile1", false},
{"/dir1/subfile2.txt", "content of subfile2", false},
{"/dir1/subdir1", "", true},
{"/dir1/subdir1/deepfile.txt", "deep content", false},
{"/dir2", "", true},
{"/dir2/anotherfile.txt", "another content", false},
{"/emptydir", "", true},
}
for _, tf := range testFiles {
if tf.isDir {
err := fsys.Mkdir(tf.path)
require.NoError(t, err)
} else {
f, err := fsys.OpenFile(tf.path, os.O_CREATE|os.O_RDWR)
require.NoError(t, err)
_, err = f.Write([]byte(tf.content))
require.NoError(t, err)
f.Close()
}
}
return fsys
}
func TestWalkDiskDir_CompleteTraversal(t *testing.T) {
fsys := createTestFS(t)
var visitedPaths []string
err := WalkDiskDir(fsys, "/", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
require.NoError(t, err)
visitedPaths = append(visitedPaths, path)
return nil
})
require.NoError(t, err)
expectedPaths := []string{
"/file1.txt",
"/file2.txt",
"/dir1",
"/dir1/subfile1.txt",
"/dir1/subfile2.txt",
"/dir1/subdir1",
"/dir1/subdir1/deepfile.txt",
"/dir2",
"/dir2/anotherfile.txt",
"/emptydir",
}
assert.ElementsMatch(t, expectedPaths, visitedPaths)
}
func TestWalkDiskDir_FileInfoCorrect(t *testing.T) {
fsys := createTestFS(t)
var fileInfos []struct {
path string
isDir bool
name string
}
err := WalkDiskDir(fsys, "/", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
require.NoError(t, err)
require.NotNil(t, d)
fileInfos = append(fileInfos, struct {
path string
isDir bool
name string
}{
path: path,
isDir: d.IsDir(),
name: d.Name(),
})
return nil
})
require.NoError(t, err)
for _, fi := range fileInfos {
expectedName := filepath.Base(fi.path)
assert.Equal(t, expectedName, fi.name)
if fi.path == "/dir1" || fi.path == "/dir2" || fi.path == "/emptydir" || fi.path == "/dir1/subdir1" {
assert.True(t, fi.isDir, "Expected %s to be directory", fi.path)
} else {
assert.False(t, fi.isDir, "Expected %s to be file", fi.path)
}
}
}
func TestWalkDiskDir_SkipDir(t *testing.T) {
fsys := createTestFS(t)
var visitedPaths []string
err := WalkDiskDir(fsys, "/", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
require.NoError(t, err)
visitedPaths = append(visitedPaths, path)
if path == "/dir1" {
return fs.SkipDir
}
return nil
})
require.NoError(t, err)
assert.Contains(t, visitedPaths, "/dir1")
assert.NotContains(t, visitedPaths, "/dir1/subfile1.txt")
assert.NotContains(t, visitedPaths, "/dir1/subfile2.txt")
assert.NotContains(t, visitedPaths, "/dir1/subdir1")
assert.NotContains(t, visitedPaths, "/dir1/subdir1/deepfile.txt")
assert.Contains(t, visitedPaths, "/dir2")
assert.Contains(t, visitedPaths, "/dir2/anotherfile.txt")
}
func TestWalkDiskDir_SkipAll(t *testing.T) {
fsys := createTestFS(t)
var visitedPaths []string
err := WalkDiskDir(fsys, "/", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
require.NoError(t, err)
visitedPaths = append(visitedPaths, path)
if path == "/dir1" {
return fs.SkipAll
}
return nil
})
require.NoError(t, err)
assert.Contains(t, visitedPaths, "/dir1")
assert.NotContains(t, visitedPaths, "/file1.txt")
assert.NotContains(t, visitedPaths, "/file2.txt")
assert.NotContains(t, visitedPaths, "/dir1/subfile1.txt")
assert.NotContains(t, visitedPaths, "/dir2")
assert.NotContains(t, visitedPaths, "/dir2/anotherfile.txt")
assert.NotContains(t, visitedPaths, "/emptydir")
}
func TestWalkDiskDir_EmptyDirectory(t *testing.T) {
fs := createTestFS(t)
var visitedPaths []string
err := WalkDiskDir(fs, "/emptydir", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
require.NoError(t, err)
visitedPaths = append(visitedPaths, path)
return nil
})
require.NoError(t, err)
assert.Empty(t, visitedPaths)
}
func TestWalkDiskDir_NonexistentPath(t *testing.T) {
fs := createTestFS(t)
err := WalkDiskDir(fs, "/nonexistent", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
return nil
})
assert.Error(t, err)
}
func TestWalkDiskDir_WalkFunctionError(t *testing.T) {
fs := createTestFS(t)
customErr := assert.AnError
err := WalkDiskDir(fs, "/", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
if path == "/file1.txt" {
return customErr
}
return nil
})
assert.Error(t, err)
assert.Equal(t, customErr, err)
}
func TestWalkDiskDir_SubdirectoryTraversal(t *testing.T) {
fs := createTestFS(t)
var visitedPaths []string
err := WalkDiskDir(fs, "/dir1", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
require.NoError(t, err)
visitedPaths = append(visitedPaths, path)
return nil
})
require.NoError(t, err)
expectedPaths := []string{
"/dir1/subfile1.txt",
"/dir1/subfile2.txt",
"/dir1/subdir1",
"/dir1/subdir1/deepfile.txt",
}
assert.ElementsMatch(t, expectedPaths, visitedPaths)
}
func TestWalkDiskDir_SingleFile(t *testing.T) {
fs := createTestFS(t)
var visitedPaths []string
err := WalkDiskDir(fs, "/file1.txt", func(fsys filesystem.FileSystem, path string, d os.FileInfo, err error) error {
require.NoError(t, err)
visitedPaths = append(visitedPaths, path)
return nil
})
// we are providing a file path, not a directory
require.Error(t, err)
assert.Empty(t, visitedPaths)
}

View File

@ -13,6 +13,17 @@ func HasAnyOfPrefixes(input string, prefixes ...string) bool {
return false
}
// HasAnyOfSuffixes returns an indication if the given string has any of the given suffixes.
func HasAnyOfSuffixes(input string, suffixes ...string) bool {
for _, suffix := range suffixes {
if strings.HasSuffix(input, suffix) {
return true
}
}
return false
}
func TruncateMiddleEllipsis(input string, maxLen int) string {
if len(input) <= maxLen {
return input

View File

@ -7,6 +7,63 @@ import (
"github.com/stretchr/testify/assert"
)
func TestHasAnyOfSuffixes(t *testing.T) {
tests := []struct {
name string
input string
suffixes []string
expected bool
}{
{
name: "go case",
input: "this has something",
suffixes: []string{
"has something",
"has NOT something",
},
expected: true,
},
{
name: "no match",
input: "this has something",
suffixes: []string{
"has NOT something",
},
expected: false,
},
{
name: "empty",
input: "this has something",
suffixes: []string{},
expected: false,
},
{
name: "positive match last",
input: "this has something",
suffixes: []string{
"that does not have",
"something",
},
expected: true,
},
{
name: "empty input",
input: "",
suffixes: []string{
"that does not have",
"this has",
},
expected: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
assert.Equal(t, test.expected, HasAnyOfSuffixes(test.input, test.suffixes...))
})
}
}
func TestHasAnyOfPrefixes(t *testing.T) {
tests := []struct {
name string

View File

@ -15,7 +15,7 @@ import (
"github.com/anchore/syft/syft/sbom"
)
func RunTask(ctx context.Context, tsk Task, resolver file.Resolver, s sbomsync.Builder, prog *monitor.CatalogerTaskProgress) error {
func RunTask(ctx context.Context, tsk Task, resolver file.Resolver, s sbomsync.Builder, prog *monitor.TaskProgress) error {
err := runTaskSafely(ctx, tsk, resolver, s)
unknowns, remainingErrors := unknown.ExtractCoordinateErrors(err)
if len(unknowns) > 0 {

View File

@ -17,7 +17,7 @@ func Test_TaskExecutor_PanicHandling(t *testing.T) {
panic("something bad happened")
})
err := RunTask(context.Background(), tsk, nil, nil, &monitor.CatalogerTaskProgress{
err := RunTask(context.Background(), tsk, nil, nil, &monitor.TaskProgress{
Manual: progress.NewManual(-1),
})

View File

@ -144,14 +144,14 @@ func setContextExecutors(ctx context.Context, cfg *CreateSBOMConfig) context.Con
return ctx
}
func monitorPackageCount(prog *monitor.CatalogerTaskProgress) func(s *sbom.SBOM) {
func monitorPackageCount(prog *monitor.TaskProgress) func(s *sbom.SBOM) {
return func(s *sbom.SBOM) {
count := humanize.Comma(int64(s.Artifacts.Packages.PackageCount()))
prog.AtomicStage.Set(fmt.Sprintf("%s packages", count))
}
}
func monitorPackageCatalogingTask() *monitor.CatalogerTaskProgress {
func monitorPackageCatalogingTask() *monitor.TaskProgress {
info := monitor.GenericTask{
Title: monitor.Title{
Default: "Packages",
@ -164,7 +164,7 @@ func monitorPackageCatalogingTask() *monitor.CatalogerTaskProgress {
return bus.StartCatalogerTask(info, -1, "")
}
func monitorCatalogingTask(srcID artifact.ID, tasks [][]task.Task) *monitor.CatalogerTaskProgress {
func monitorCatalogingTask(srcID artifact.ID, tasks [][]task.Task) *monitor.TaskProgress {
info := monitor.GenericTask{
Title: monitor.Title{
Default: "Catalog contents",

View File

@ -464,6 +464,8 @@ func findDefaultTags(src source.Description) ([]string, error) {
return []string{pkgcataloging.ImageTag, filecataloging.FileTag}, nil
case source.FileMetadata, source.DirectoryMetadata:
return []string{pkgcataloging.DirectoryTag, filecataloging.FileTag}, nil
case source.SnapMetadata:
return []string{pkgcataloging.InstalledTag, filecataloging.FileTag}, nil
default:
return nil, fmt.Errorf("unable to determine default cataloger tag for source type=%T", m)
}

View File

@ -23,6 +23,10 @@ const (
// CatalogerTaskStarted is a partybus event that occurs when starting a task within a cataloger
CatalogerTaskStarted partybus.EventType = typePrefix + "-cataloger-task-started"
// PullSourceStarted is a partybus event that occurs when starting to pull a source (does not overlap with stereoscope image pull events,
// this covers any additional sources such as snap and git repos).
PullSourceStarted partybus.EventType = typePrefix + "-pull-source-started"
// Events exclusively for the CLI
// CLIAppUpdateAvailable is a partybus event that occurs when an application update is available

View File

@ -1,15 +0,0 @@
package monitor
import (
"github.com/wagoodman/go-progress"
)
const (
TopLevelCatalogingTaskID = "cataloging"
PackageCatalogingTaskID = "package-cataloging"
)
type CatalogerTaskProgress struct {
*progress.AtomicStage
*progress.Manual
}

View File

@ -6,6 +6,11 @@ import (
"github.com/wagoodman/go-progress"
)
const (
TopLevelCatalogingTaskID = "cataloging"
PackageCatalogingTaskID = "package-cataloging"
)
type ShellProgress struct {
io.Reader
progress.Progressable
@ -34,3 +39,8 @@ type GenericTask struct {
ParentID string
Context string
}
type TaskProgress struct {
*progress.AtomicStage
*progress.Manual
}

View File

@ -77,6 +77,26 @@ func ParseCatalogerTaskStarted(e partybus.Event) (progress.StagedProgressable, *
return mon, &source, nil
}
func ParsePullSourceStarted(e partybus.Event) (progress.StagedProgressable, *monitor.GenericTask, error) {
if err := checkEventType(e.Type, event.PullSourceStarted); err != nil {
return nil, nil, err
}
var mon progress.StagedProgressable
source, ok := e.Source.(monitor.GenericTask)
if !ok {
return nil, nil, newPayloadErr(e.Type, "Source", e.Source)
}
mon, ok = e.Value.(progress.StagedProgressable)
if !ok {
mon = nil
}
return mon, &source, nil
}
func ParseAttestationStartedEvent(e partybus.Event) (io.Reader, progress.Progressable, *monitor.GenericTask, error) {
if err := checkEventType(e.Type, event.AttestationStarted); err != nil {
return nil, nil, nil, err

View File

@ -106,7 +106,7 @@ func processExecutableLocation(loc file.Location, resolver file.Resolver) (*file
return processExecutable(loc, uReader)
}
func catalogingProgress(locations int64) *monitor.CatalogerTaskProgress {
func catalogingProgress(locations int64) *monitor.TaskProgress {
info := monitor.GenericTask{
Title: monitor.Title{
Default: "Executables",

View File

@ -113,7 +113,7 @@ func (i *Cataloger) catalogLocation(resolver file.Resolver, location file.Locati
return buf.String(), nil
}
func catalogingProgress(locations int64) *monitor.CatalogerTaskProgress {
func catalogingProgress(locations int64) *monitor.TaskProgress {
info := monitor.GenericTask{
Title: monitor.Title{
Default: "File contents",

View File

@ -112,7 +112,7 @@ func (i *Cataloger) catalogLocation(ctx context.Context, resolver file.Resolver,
return digests, nil
}
func catalogingProgress(locations int64) *monitor.CatalogerTaskProgress {
func catalogingProgress(locations int64) *monitor.TaskProgress {
info := monitor.GenericTask{
Title: monitor.Title{
Default: "File digests",

View File

@ -76,7 +76,7 @@ func (i *Cataloger) Catalog(ctx context.Context, resolver file.Resolver, coordin
return results, errs
}
func catalogingProgress(locations int64) *monitor.CatalogerTaskProgress {
func catalogingProgress(locations int64) *monitor.TaskProgress {
info := monitor.GenericTask{
Title: monitor.Title{
Default: "File metadata",

View File

@ -37,6 +37,7 @@ const (
prefixImage = "Image"
prefixDirectory = "Directory"
prefixFile = "File"
prefixSnap = "Snap"
prefixUnknown = "Unknown"
)
@ -228,6 +229,18 @@ func toRootPackage(s source.Description) *spdx.Package {
Value: d.Value,
})
}
case source.SnapMetadata:
prefix = prefixSnap
purpose = spdxPrimaryPurposeContainer
for _, d := range m.Digests {
checksums = append(checksums, spdx.Checksum{
Algorithm: toChecksumAlgorithm(d.Algorithm),
Value: d.Value,
})
}
default:
prefix = prefixUnknown
purpose = spdxPrimaryPurposeOther

View File

@ -237,6 +237,82 @@ func Test_toFormatModel(t *testing.T) {
},
},
},
{
name: "snap",
in: sbom.SBOM{
Source: source.Description{
Name: "etcd",
Version: "3.4.36",
Metadata: source.SnapMetadata{
Summary: "Distributed reliable key-value store",
Base: "core18",
Grade: "stable",
Confinement: "strict",
Architectures: []string{
"amd64",
},
Digests: []file.Digest{
{
Algorithm: "sha256",
Value: "d34db33f",
},
},
},
},
Artifacts: sbom.Artifacts{
Packages: pkg.NewCollection(pkg.Package{
Name: "pkg-1",
Version: "version-1",
}),
},
},
expected: &spdx.Document{
SPDXIdentifier: "DOCUMENT",
SPDXVersion: spdx.Version,
DataLicense: spdx.DataLicense,
DocumentName: "etcd",
Packages: []*spdx.Package{
{
PackageSPDXIdentifier: "Package-pkg-1-pkg-1",
PackageName: "pkg-1",
PackageVersion: "version-1",
PackageSupplier: &spdx.Supplier{
Supplier: "NOASSERTION",
},
},
{
PackageSPDXIdentifier: "DocumentRoot-Snap-etcd",
PackageName: "etcd",
PackageVersion: "3.4.36",
PrimaryPackagePurpose: "CONTAINER",
PackageChecksums: []spdx.Checksum{{Algorithm: "SHA256", Value: "d34db33f"}},
PackageSupplier: &spdx.Supplier{
Supplier: "NOASSERTION",
},
},
},
Relationships: []*spdx.Relationship{
{
RefA: spdx.DocElementID{
ElementRefID: "DocumentRoot-Snap-etcd",
},
RefB: spdx.DocElementID{
ElementRefID: "Package-pkg-1-pkg-1",
},
Relationship: spdx.RelationshipContains,
},
{
RefA: spdx.DocElementID{
ElementRefID: "DOCUMENT",
},
RefB: spdx.DocElementID{
ElementRefID: "DocumentRoot-Snap-etcd",
},
Relationship: spdx.RelationshipDescribes,
},
},
},
},
}
for _, test := range tests {

View File

@ -130,6 +130,11 @@ func toPath(s source.Description, p pkg.Package) string {
return fmt.Sprintf("%s/%s", path, packagePath)
}
return packagePath
case source.SnapMetadata:
if inputPath != "" {
return fmt.Sprintf("%s:/%s", inputPath, packagePath)
}
return packagePath
}
}
return inputPath

View File

@ -173,6 +173,11 @@ func Test_toGithubModel(t *testing.T) {
metadata: source.FileMetadata{Path: "./archive.tar.gz"},
testPath: "archive.tar.gz:/etc",
},
{
name: "snap",
metadata: source.SnapMetadata{},
testPath: "name:/etc",
},
}
for _, test := range tests {
@ -180,6 +185,7 @@ func Test_toGithubModel(t *testing.T) {
s := sbomFixture()
if test.metadata != nil {
s.Source.Name = "name"
s.Source.Metadata = test.metadata
}
actual := ToGithubModel(&s)

View File

@ -45,6 +45,15 @@ func Test_DocumentName(t *testing.T) {
},
expected: "some/path/to/place",
},
{
name: "snap",
srcMetadata: source.Description{
Name: "some/name",
// there is nothing in the snap metadata that indicates a name
Metadata: source.SnapMetadata{},
},
expected: "some/name",
},
{
name: "named",
srcMetadata: source.Description{

View File

@ -16,6 +16,7 @@ const (
InputImage = "image"
InputDirectory = "dir"
InputFile = "file"
InputSnap = "snap"
)
func DocumentNameAndNamespace(src source.Description, desc sbom.Descriptor) (string, string) {
@ -33,6 +34,8 @@ func DocumentNamespace(name string, src source.Description, desc sbom.Descriptor
input = InputDirectory
case source.FileMetadata:
input = InputFile
case source.SnapMetadata:
input = InputSnap
}
uniqueID := uuid.Must(uuid.NewRandom())

View File

@ -12,7 +12,7 @@ import (
"github.com/anchore/syft/syft/source"
)
func Test_documentNamespace(t *testing.T) {
func Test_DocumentNamespace(t *testing.T) {
tracker := sourcemetadata.NewCompletionTester(t)
tests := []struct {
@ -53,6 +53,14 @@ func Test_documentNamespace(t *testing.T) {
},
expected: "https://anchore.com/syft/file/my-name-",
},
{
name: "snap",
inputName: "my-name",
src: source.Description{
Metadata: source.SnapMetadata{},
},
expected: "https://anchore.com/syft/snap/my-name-",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {

View File

@ -145,6 +145,51 @@ func TestSource_UnmarshalJSON(t *testing.T) {
},
},
},
{
name: "snap",
input: []byte(`{
"id": "foobar",
"name": "etcd",
"version": "3.4.36",
"type": "snap",
"metadata": {
"summary": "Distributed reliable key-value store",
"base": "core18",
"grade": "stable",
"confinement": "strict",
"architectures": [
"amd64"
],
"digests": [
{
"algorithm": "sha256",
"value": "6700d789d2c38b0f7513058ddcea8f9a275e2206b4621a772eb065e12069956e"
}
]
}
}`),
expected: &Source{
ID: "foobar",
Name: "etcd",
Version: "3.4.36",
Type: "snap",
Metadata: source.SnapMetadata{
Summary: "Distributed reliable key-value store",
Base: "core18",
Grade: "stable",
Confinement: "strict",
Architectures: []string{
"amd64",
},
Digests: []file.Digest{
{
Algorithm: "sha256",
Value: "6700d789d2c38b0f7513058ddcea8f9a275e2206b4621a772eb065e12069956e",
},
},
},
},
},
{
name: "unknown source type",
input: []byte(`{

View File

@ -127,6 +127,36 @@ func Test_toSourceModel(t *testing.T) {
},
},
},
{
name: "snap",
src: source.Description{
ID: "test-id",
Name: "some-name",
Version: "some-version",
Metadata: source.SnapMetadata{
Summary: "some summary",
Base: "some/base",
Grade: "some grade",
Confinement: "some confinement",
Architectures: []string{"x86_64", "arm64"},
Digests: []file.Digest{{Algorithm: "sha256", Value: "some-digest"}},
},
},
expected: model.Source{
ID: "test-id",
Name: "some-name",
Version: "some-version",
Type: "snap",
Metadata: source.SnapMetadata{
Summary: "some summary",
Base: "some/base",
Grade: "some grade",
Confinement: "some confinement",
Architectures: []string{"x86_64", "arm64"},
Digests: []file.Digest{{Algorithm: "sha256", Value: "some-digest"}},
},
},
},
// below are regression tests for when the name/version are not provided
// historically we've hoisted up the name/version from the metadata, now it is a simple pass-through
{

View File

@ -100,6 +100,36 @@ func Test_toSyftSourceData(t *testing.T) {
},
},
},
{
name: "snap",
src: model.Source{
ID: "the-id",
Name: "some-name",
Version: "some-version",
Type: "snap",
Metadata: source.SnapMetadata{
Summary: "something!",
Base: "base!",
Grade: "grade!",
Confinement: "confined!",
Architectures: []string{"arch!"},
Digests: []file.Digest{{Algorithm: "sha256", Value: "some-digest!"}},
},
},
expected: &source.Description{
ID: "the-id",
Name: "some-name",
Version: "some-version",
Metadata: source.SnapMetadata{
Summary: "something!",
Base: "base!",
Grade: "grade!",
Confinement: "confined!",
Architectures: []string{"arch!"},
Digests: []file.Digest{{Algorithm: "sha256", Value: "some-digest!"}},
},
},
},
// below are regression tests for when the name/version are not provided
// historically we've hoisted up the name/version from the metadata, now it is a simple pass-through
{

View File

@ -34,26 +34,44 @@ func GetSource(ctx context.Context, userInput string, cfg *GetSourceConfig) (sou
errs = append(errs, fmt.Errorf("%s: %w", p.Name(), err))
}
}
if err := validateSourcePlatform(src, cfg); err != nil {
return nil, err
}
if src != nil {
// if we have a non-image type and platform is specified, it's an error
if cfg.SourceProviderConfig.Platform != nil {
meta := src.Describe().Metadata
switch meta.(type) {
case *source.ImageMetadata, source.ImageMetadata:
default:
return src, fmt.Errorf("platform specified with non-image source")
}
}
return src, nil
}
}
if len(errs) == 0 {
return nil, fmt.Errorf("no source providers were able to resolve the input %q", userInput)
}
if len(fileNotFoundProviders) > 0 {
errs = append(errs, fmt.Errorf("additionally, the following providers failed with %w: %s", os.ErrNotExist, strings.Join(fileNotFoundProviders, ", ")))
}
return nil, sourceError(userInput, errs...)
}
func validateSourcePlatform(src source.Source, cfg *GetSourceConfig) error {
if src == nil {
return nil
}
if cfg == nil || cfg.SourceProviderConfig == nil || cfg.SourceProviderConfig.Platform == nil {
return nil
}
meta := src.Describe().Metadata
switch meta.(type) {
case *source.ImageMetadata, source.ImageMetadata:
return nil
case *source.SnapMetadata, source.SnapMetadata:
return nil
default:
return fmt.Errorf("platform is not supported for this source type")
}
}
func sourceError(userInput string, errs ...error) error {
switch len(errs) {
case 0:

208
syft/get_source_test.go Normal file
View File

@ -0,0 +1,208 @@
package syft
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/anchore/stereoscope/pkg/image"
"github.com/anchore/syft/syft/internal/sourcemetadata"
"github.com/anchore/syft/syft/source"
"github.com/anchore/syft/syft/source/sourceproviders"
)
type mockSource struct {
source.Source
desc source.Description
}
func (s mockSource) Describe() source.Description {
return s.desc
}
func TestValidateSourcePlatform_NilSource(t *testing.T) {
cfg := &GetSourceConfig{
SourceProviderConfig: &sourceproviders.Config{
Platform: &image.Platform{
Architecture: "amd64",
OS: "linux",
},
},
}
err := validateSourcePlatform(nil, cfg)
if err != nil {
t.Errorf("Expected no error for nil source, got: %v", err)
}
}
func TestValidateSourcePlatform_NilPlatformConfig(t *testing.T) {
tests := []struct {
name string
cfg *GetSourceConfig
}{
{
name: "nil config",
cfg: nil,
},
{
name: "nil SourceProviderConfig",
cfg: &GetSourceConfig{
SourceProviderConfig: nil,
},
},
{
name: "nil Platform",
cfg: &GetSourceConfig{
SourceProviderConfig: &sourceproviders.Config{
Platform: nil,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
src := mockSource{
desc: source.Description{
Metadata: &source.ImageMetadata{},
},
}
err := validateSourcePlatform(src, tt.cfg)
if err != nil {
t.Errorf("Expected no error for nil platform, got: %v", err)
}
})
}
}
func TestValidateSourcePlatform_SupportedMetadataTypes(t *testing.T) {
tracker := sourcemetadata.NewCompletionTester(t)
cfg := &GetSourceConfig{
SourceProviderConfig: &sourceproviders.Config{
Platform: &image.Platform{
Architecture: "amd64",
OS: "linux",
},
},
}
tests := []struct {
name string
metadata any
wantErr require.ErrorAssertionFunc
}{
{
name: "image",
metadata: source.ImageMetadata{},
},
{
name: "snap",
metadata: source.SnapMetadata{},
},
{
name: "dir",
metadata: source.DirectoryMetadata{},
wantErr: require.Error,
},
{
name: "file",
metadata: source.FileMetadata{},
wantErr: require.Error,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.wantErr == nil {
tt.wantErr = require.NoError
}
tracker.Tested(t, tt.metadata)
src := mockSource{
desc: source.Description{
Metadata: tt.metadata,
},
}
err := validateSourcePlatform(src, cfg)
tt.wantErr(t, err, "Expected no error for %s, got: %v", tt.name, err)
})
}
}
func TestValidateSourcePlatform_UnsupportedMetadataTypes(t *testing.T) {
cfg := &GetSourceConfig{
SourceProviderConfig: &sourceproviders.Config{
Platform: &image.Platform{
Architecture: "amd64",
OS: "linux",
},
},
}
tests := []struct {
name string
metadata interface{}
}{
{
name: "string metadata",
metadata: "unsupported",
},
{
name: "int metadata",
metadata: 42,
},
{
name: "nil metadata",
metadata: nil,
},
{
name: "custom struct",
metadata: struct{ Name string }{Name: "test"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
src := mockSource{
desc: source.Description{
Metadata: tt.metadata,
},
}
err := validateSourcePlatform(src, cfg)
if err == nil {
t.Errorf("Expected error for %s, got nil", tt.name)
}
expectedMsg := "platform is not supported for this source type"
if err.Error() != expectedMsg {
t.Errorf("Expected error message %q, got %q", expectedMsg, err.Error())
}
})
}
}
func TestValidateSourcePlatform_ValidCombination(t *testing.T) {
cfg := &GetSourceConfig{
SourceProviderConfig: &sourceproviders.Config{
Platform: &image.Platform{
Architecture: "amd64",
OS: "linux",
},
},
}
src := mockSource{
desc: source.Description{
Metadata: &source.ImageMetadata{},
},
}
err := validateSourcePlatform(src, cfg)
if err != nil {
t.Errorf("Expected no error for valid combination, got: %v", err)
}
}

View File

@ -14,7 +14,7 @@ var _ file.Resolver = (*Directory)(nil)
// Directory implements path and content access for the directory data source.
type Directory struct {
filetreeResolver
FiletreeResolver
path string
indexer *directoryIndexer
}
@ -39,10 +39,11 @@ func newFromDirectoryWithoutIndex(root string, base string, pathFilters ...PathI
return &Directory{
path: cleanRoot,
filetreeResolver: filetreeResolver{
chroot: *chroot,
tree: filetree.New(),
index: filetree.NewIndex(),
FiletreeResolver: FiletreeResolver{
Chroot: *chroot,
Tree: filetree.New(),
Index: filetree.NewIndex(),
Opener: nativeOSFileOpener,
},
indexer: newDirectoryIndexer(cleanRoot, cleanBase, pathFilters...),
}, nil
@ -57,9 +58,9 @@ func (r *Directory) buildIndex() error {
return err
}
r.tree = tree
r.index = index
r.searchContext = filetree.NewSearchContext(tree, index)
r.Tree = tree
r.Index = index
r.SearchContext = filetree.NewSearchContext(tree, index)
return nil
}

View File

@ -9,14 +9,12 @@ import (
"path/filepath"
"strings"
"github.com/wagoodman/go-partybus"
"github.com/wagoodman/go-progress"
"github.com/anchore/stereoscope/pkg/file"
"github.com/anchore/stereoscope/pkg/filetree"
"github.com/anchore/syft/internal/bus"
"github.com/anchore/syft/internal/log"
"github.com/anchore/syft/syft/event"
"github.com/anchore/syft/syft/internal/windows"
)
@ -64,14 +62,14 @@ func (r *directoryIndexer) build() (filetree.Reader, filetree.IndexReader, error
return r.tree, r.index, indexAllRoots(r.path, r.indexTree)
}
func indexAllRoots(root string, indexer func(string, *progress.Stage) ([]string, error)) error {
func indexAllRoots(root string, indexer func(string, *progress.AtomicStage) ([]string, error)) error {
// why account for multiple roots? To cover cases when there is a symlink that references above the root path,
// in which case we need to additionally index where the link resolves to. it's for this reason why the filetree
// must be relative to the root of the filesystem (and not just relative to the given path).
pathsToIndex := []string{root}
fullPathsMap := map[string]struct{}{}
stager, prog := indexingProgress(root)
prog := bus.StartIndexingFiles(root)
defer prog.SetCompleted()
loop:
for {
@ -85,7 +83,7 @@ loop:
currentPath, pathsToIndex = pathsToIndex[0], pathsToIndex[1:]
}
additionalRoots, err := indexer(currentPath, stager)
additionalRoots, err := indexer(currentPath, prog.AtomicStage)
if err != nil {
return fmt.Errorf("unable to index filesystem path=%q: %w", currentPath, err)
}
@ -101,7 +99,7 @@ loop:
return nil
}
func (r *directoryIndexer) indexTree(root string, stager *progress.Stage) ([]string, error) {
func (r *directoryIndexer) indexTree(root string, stager *progress.AtomicStage) ([]string, error) {
log.WithFields("path", root).Trace("indexing filetree")
var roots []string
@ -144,7 +142,7 @@ func (r *directoryIndexer) indexTree(root string, stager *progress.Stage) ([]str
err = filepath.Walk(root,
func(path string, info os.FileInfo, err error) error {
stager.Current = path
stager.Set(path)
newRoot, err := r.indexPath(path, info, err)
@ -179,7 +177,7 @@ func isRealPath(root string) (bool, error) {
return rootParent == realRootParent, nil
}
func (r *directoryIndexer) indexBranch(root string, stager *progress.Stage) ([]string, error) {
func (r *directoryIndexer) indexBranch(root string, stager *progress.AtomicStage) ([]string, error) {
rootRealPath, err := filepath.EvalSymlinks(root)
if err != nil {
var pathErr *os.PathError
@ -213,7 +211,7 @@ func (r *directoryIndexer) indexBranch(root string, stager *progress.Stage) ([]s
targetPath = p
}
stager.Current = targetPath
stager.Set(targetPath)
lstat, err := os.Lstat(targetPath)
newRoot, err := r.indexPath(targetPath, lstat, err)
@ -492,22 +490,3 @@ func requireFileInfo(_, _ string, info os.FileInfo, _ error) error {
}
return nil
}
func indexingProgress(path string) (*progress.Stage, *progress.Manual) {
stage := &progress.Stage{}
prog := progress.NewManual(-1)
bus.Publish(partybus.Event{
Type: event.FileIndexingStarted,
Source: path,
Value: struct {
progress.Stager
progress.Progressable
}{
Stager: progress.Stager(stage),
Progressable: prog,
},
})
return stage, prog
}

View File

@ -25,7 +25,7 @@ type indexerMock struct {
additionalRoots map[string][]string
}
func (m *indexerMock) indexer(s string, _ *progress.Stage) ([]string, error) {
func (m *indexerMock) indexer(s string, _ *progress.AtomicStage) ([]string, error) {
m.observedRoots = append(m.observedRoots, s)
return m.additionalRoots[s], nil
}

View File

@ -12,7 +12,7 @@ var _ file.Resolver = (*File)(nil)
// File implements path and content access for the file data source.
type File struct {
filetreeResolver
FiletreeResolver
path string
indexer *fileIndexer
}
@ -29,10 +29,11 @@ func NewFromFile(parent, path string, pathFilters ...PathIndexVisitor) (*File, e
file := &File{
path: path,
filetreeResolver: filetreeResolver{
chroot: *chroot,
tree: filetree.New(),
index: filetree.NewIndex(),
FiletreeResolver: FiletreeResolver{
Chroot: *chroot,
Tree: filetree.New(),
Index: filetree.NewIndex(),
Opener: nativeOSFileOpener,
},
indexer: newFileIndexer(path, cleanBase, pathFilters...),
}
@ -49,9 +50,9 @@ func (r *File) buildIndex() error {
return err
}
r.tree = tree
r.index = index
r.searchContext = filetree.NewSearchContext(tree, index)
r.Tree = tree
r.Index = index
r.SearchContext = filetree.NewSearchContext(tree, index)
return nil
}

View File

@ -9,6 +9,7 @@ import (
"github.com/anchore/stereoscope/pkg/file"
"github.com/anchore/stereoscope/pkg/filetree"
"github.com/anchore/syft/internal/bus"
"github.com/anchore/syft/internal/log"
"github.com/anchore/syft/syft/internal/windows"
)
@ -49,14 +50,14 @@ func (r *fileIndexer) build() (filetree.Reader, filetree.IndexReader, error) {
// Index file at the given path
// A file indexer simply indexes the file and its directory.
func index(path string, indexer func(string, *progress.Stage) error) error {
func index(path string, indexer func(string, *progress.AtomicStage) error) error {
// We want to index the file at the provided path and its parent directory.
// We need to probably check that we have file access
// We also need to determine what to do when the file itself is a symlink.
stager, prog := indexingProgress(path)
prog := bus.StartIndexingFiles(path)
defer prog.SetCompleted()
err := indexer(path, stager)
err := indexer(path, prog.AtomicStage)
if err != nil {
return fmt.Errorf("unable to index filesystem path=%q: %w", path, err)
}
@ -70,7 +71,7 @@ func index(path string, indexer func(string, *progress.Stage) error) error {
// permissions errors on the file at path or its parent directory will return an error.
// Filter functions provided to the indexer are honoured, so if the path provided (or its parent
// directory) is filtered by a filter function, an error is returned.
func (r *fileIndexer) indexPath(path string, stager *progress.Stage) error {
func (r *fileIndexer) indexPath(path string, stager *progress.AtomicStage) error {
log.WithFields("path", path).Trace("indexing file path")
absPath, err := filepath.Abs(path)
@ -105,14 +106,14 @@ func (r *fileIndexer) indexPath(path string, stager *progress.Stage) error {
return fmt.Errorf("unable to stat parent of file=%q: %w", absSymlinkFreeParent, err)
}
stager.Current = absSymlinkFreeParent
stager.Set(absSymlinkFreeParent)
indexParentErr := r.filterAndIndex(absSymlinkFreeParent, parentFi)
if indexParentErr != nil {
return indexParentErr
}
// We have indexed the parent successfully, now attempt to index the file.
stager.Current = absSymlinkFreeFilePath
stager.Set(absSymlinkFreeFilePath)
indexFileErr := r.filterAndIndex(absSymlinkFreeFilePath, fi)
if indexFileErr != nil {
return indexFileErr

View File

@ -14,33 +14,46 @@ import (
"github.com/anchore/syft/syft/internal/windows"
)
type filetreeResolver struct {
chroot ChrootContext
tree filetree.Reader
index filetree.IndexReader
searchContext filetree.Searcher
// TODO: consider making a constructor for this
type FiletreeResolver struct {
Chroot ChrootContext
Tree filetree.Reader
Index filetree.IndexReader
SearchContext filetree.Searcher
Opener func(stereoscopeFile.Reference) (io.ReadCloser, error)
}
func (r *filetreeResolver) requestPath(userPath string) (string, error) {
return r.chroot.ToNativePath(userPath)
func nativeOSFileOpener(ref stereoscopeFile.Reference) (io.ReadCloser, error) {
// RealPath is posix so for windows file resolver we need to translate
// to its true on disk path.
filePath := string(ref.RealPath)
if windows.HostRunningOnWindows() {
filePath = windows.FromPosix(filePath)
}
return stereoscopeFile.NewLazyReadCloser(filePath), nil
}
func (r *FiletreeResolver) requestPath(userPath string) (string, error) {
return r.Chroot.ToNativePath(userPath)
}
// responsePath takes a path from the underlying fs domain and converts it to a path that is relative to the root of the file resolver.
func (r filetreeResolver) responsePath(path string) string {
return r.chroot.ToChrootPath(path)
func (r FiletreeResolver) responsePath(path string) string {
return r.Chroot.ToChrootPath(path)
}
// HasPath indicates if the given path exists in the underlying source.
func (r *filetreeResolver) HasPath(userPath string) bool {
func (r *FiletreeResolver) HasPath(userPath string) bool {
requestPath, err := r.requestPath(userPath)
if err != nil {
return false
}
return r.tree.HasPath(stereoscopeFile.Path(requestPath))
return r.Tree.HasPath(stereoscopeFile.Path(requestPath))
}
// FilesByPath returns all file.References that match the given paths from the file index.
func (r filetreeResolver) FilesByPath(userPaths ...string) ([]file.Location, error) {
func (r FiletreeResolver) FilesByPath(userPaths ...string) ([]file.Location, error) {
var references = make([]file.Location, 0)
for _, userPath := range userPaths {
@ -51,7 +64,7 @@ func (r filetreeResolver) FilesByPath(userPaths ...string) ([]file.Location, err
}
// we should be resolving symlinks and preserving this information as a AccessPath to the real file
ref, err := r.searchContext.SearchByPath(userStrPath, filetree.FollowBasenameLinks)
ref, err := r.SearchContext.SearchByPath(userStrPath, filetree.FollowBasenameLinks)
if err != nil {
log.Tracef("unable to evaluate symlink for path=%q : %+v", userPath, err)
continue
@ -61,7 +74,7 @@ func (r filetreeResolver) FilesByPath(userPaths ...string) ([]file.Location, err
continue
}
entry, err := r.index.Get(*ref.Reference)
entry, err := r.Index.Get(*ref.Reference)
if err != nil {
log.Warnf("unable to get file by path=%q : %+v", userPath, err)
continue
@ -90,12 +103,12 @@ func (r filetreeResolver) FilesByPath(userPaths ...string) ([]file.Location, err
return references, nil
}
func (r filetreeResolver) requestGlob(pattern string) (string, error) {
return r.chroot.ToNativeGlob(pattern)
func (r FiletreeResolver) requestGlob(pattern string) (string, error) {
return r.Chroot.ToNativeGlob(pattern)
}
// FilesByGlob returns all file.References that match the given path glob pattern from any layer in the image.
func (r filetreeResolver) FilesByGlob(patterns ...string) ([]file.Location, error) {
func (r FiletreeResolver) FilesByGlob(patterns ...string) ([]file.Location, error) {
uniqueFileIDs := stereoscopeFile.NewFileReferenceSet()
uniqueLocations := make([]file.Location, 0)
@ -104,7 +117,7 @@ func (r filetreeResolver) FilesByGlob(patterns ...string) ([]file.Location, erro
if err != nil {
return nil, err
}
refVias, err := r.searchContext.SearchByGlob(requestGlob, filetree.FollowBasenameLinks)
refVias, err := r.SearchContext.SearchByGlob(requestGlob, filetree.FollowBasenameLinks)
if err != nil {
return nil, err
}
@ -112,7 +125,7 @@ func (r filetreeResolver) FilesByGlob(patterns ...string) ([]file.Location, erro
if !refVia.HasReference() || uniqueFileIDs.Contains(*refVia.Reference) {
continue
}
entry, err := r.index.Get(*refVia.Reference)
entry, err := r.Index.Get(*refVia.Reference)
if err != nil {
return nil, fmt.Errorf("unable to get file metadata for reference %s: %w", refVia.RealPath, err)
}
@ -137,7 +150,7 @@ func (r filetreeResolver) FilesByGlob(patterns ...string) ([]file.Location, erro
// RelativeFileByPath fetches a single file at the given path relative to the layer squash of the given reference.
// This is helpful when attempting to find a file that is in the same layer or lower as another file.
func (r *filetreeResolver) RelativeFileByPath(_ file.Location, path string) *file.Location {
func (r *FiletreeResolver) RelativeFileByPath(_ file.Location, path string) *file.Location {
paths, err := r.FilesByPath(path)
if err != nil {
return nil
@ -151,12 +164,12 @@ func (r *filetreeResolver) RelativeFileByPath(_ file.Location, path string) *fil
// FileContentsByLocation fetches file contents for a single file reference relative to a directory.
// If the path does not exist an error is returned.
func (r filetreeResolver) FileContentsByLocation(location file.Location) (io.ReadCloser, error) {
func (r FiletreeResolver) FileContentsByLocation(location file.Location) (io.ReadCloser, error) {
if location.RealPath == "" {
return nil, errors.New("empty path given")
}
entry, err := r.index.Get(location.Reference())
entry, err := r.Index.Get(location.Reference())
if err != nil {
return nil, err
}
@ -166,21 +179,14 @@ func (r filetreeResolver) FileContentsByLocation(location file.Location) (io.Rea
return nil, fmt.Errorf("cannot read contents of non-file %q", location.Reference().RealPath)
}
// RealPath is posix so for windows file resolver we need to translate
// to its true on disk path.
filePath := string(location.Reference().RealPath)
if windows.HostRunningOnWindows() {
filePath = windows.FromPosix(filePath)
return r.Opener(location.Reference())
}
return stereoscopeFile.NewLazyReadCloser(filePath), nil
}
func (r *filetreeResolver) AllLocations(ctx context.Context) <-chan file.Location {
func (r *FiletreeResolver) AllLocations(ctx context.Context) <-chan file.Location {
results := make(chan file.Location)
go func() {
defer close(results)
for _, ref := range r.tree.AllFiles(stereoscopeFile.AllTypes()...) {
for _, ref := range r.Tree.AllFiles(stereoscopeFile.AllTypes()...) {
select {
case <-ctx.Done():
return
@ -192,8 +198,8 @@ func (r *filetreeResolver) AllLocations(ctx context.Context) <-chan file.Locatio
return results
}
func (r *filetreeResolver) FileMetadataByLocation(location file.Location) (file.Metadata, error) {
entry, err := r.index.Get(location.Reference())
func (r *FiletreeResolver) FileMetadataByLocation(location file.Location) (file.Metadata, error) {
entry, err := r.Index.Get(location.Reference())
if err != nil {
return file.Metadata{}, fmt.Errorf("location: %+v : %w", location, os.ErrNotExist)
}
@ -201,11 +207,11 @@ func (r *filetreeResolver) FileMetadataByLocation(location file.Location) (file.
return entry.Metadata, nil
}
func (r *filetreeResolver) FilesByMIMEType(types ...string) ([]file.Location, error) {
func (r *FiletreeResolver) FilesByMIMEType(types ...string) ([]file.Location, error) {
uniqueFileIDs := stereoscopeFile.NewFileReferenceSet()
uniqueLocations := make([]file.Location, 0)
refVias, err := r.searchContext.SearchByMIMEType(types...)
refVias, err := r.SearchContext.SearchByMIMEType(types...)
if err != nil {
return nil, err
}

View File

@ -971,7 +971,7 @@ func Test_directoryResolver_FileContentsByLocation(t *testing.T) {
r, err := NewFromDirectory(".", "")
require.NoError(t, err)
exists, existingPath, err := r.tree.File(stereoscopeFile.Path(filepath.Join(cwd, "test-fixtures/image-simple/file-1.txt")))
exists, existingPath, err := r.Tree.File(stereoscopeFile.Path(filepath.Join(cwd, "test-fixtures/image-simple/file-1.txt")))
require.True(t, exists)
require.NoError(t, err)
require.True(t, existingPath.HasReference())
@ -1271,7 +1271,7 @@ func TestDirectoryResolver_FilesContents_errorOnDirRequest(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for loc := range resolver.AllLocations(ctx) {
entry, err := resolver.index.Get(loc.Reference())
entry, err := resolver.Index.Get(loc.Reference())
require.NoError(t, err)
if entry.Metadata.IsDir() {
dirLoc = &loc
@ -1500,7 +1500,7 @@ func Test_fileResolver_FileContentsByLocation(t *testing.T) {
r, err := NewFromFile(parentPath, filePath)
require.NoError(t, err)
exists, existingPath, err := r.tree.File(stereoscopeFile.Path(filepath.Join(cwd, "test-fixtures/image-simple/file-1.txt")))
exists, existingPath, err := r.Tree.File(stereoscopeFile.Path(filepath.Join(cwd, "test-fixtures/image-simple/file-1.txt")))
require.True(t, exists)
require.NoError(t, err)
require.True(t, existingPath.HasReference())
@ -1553,7 +1553,7 @@ func TestFileResolver_AllLocations_errorOnDirRequest(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for loc := range resolver.AllLocations(ctx) {
entry, err := resolver.index.Get(loc.Reference())
entry, err := resolver.Index.Get(loc.Reference())
require.NoError(t, err)
if dirLoc == nil && entry.Metadata.IsDir() {
dirLoc = &loc

View File

@ -6,5 +6,5 @@ import "github.com/anchore/syft/syft/source"
// AllTypes returns a list of all source metadata types that syft supports (that are represented in the source.Description.Metadata field).
func AllTypes() []any {
return []any{source.DirectoryMetadata{}, source.FileMetadata{}, source.ImageMetadata{}}
return []any{source.DirectoryMetadata{}, source.FileMetadata{}, source.ImageMetadata{}, source.SnapMetadata{}}
}

View File

@ -11,6 +11,7 @@ var jsonNameFromType = map[reflect.Type][]string{
reflect.TypeOf(source.DirectoryMetadata{}): {"directory", "dir"},
reflect.TypeOf(source.FileMetadata{}): {"file"},
reflect.TypeOf(source.ImageMetadata{}): {"image"},
reflect.TypeOf(source.SnapMetadata{}): {"snap"},
}
func AllTypeNames() []string {

View File

@ -4,6 +4,9 @@ import (
"bytes"
"fmt"
"io"
"sync"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
macho "github.com/anchore/go-macholibre"
"github.com/anchore/syft/internal/log"
@ -48,12 +51,15 @@ func GetUnionReader(readerCloser io.ReadCloser) (UnionReader, error) {
// file.LocationReadCloser embeds a ReadCloser, which is likely
// to implement UnionReader. Check whether the embedded read closer
// implements UnionReader, and just return that if so.
r, ok := readerCloser.(file.LocationReadCloser)
if ok {
ur, ok := r.ReadCloser.(UnionReader)
if ok {
return ur, nil
if r, ok := readerCloser.(file.LocationReadCloser); ok {
return GetUnionReader(r.ReadCloser)
}
if r, ok := readerCloser.(*squashfs.File); ok {
// seeking is implemented, but not io.ReaderAt. Lets wrap it to prevent from degrading performance
// by copying all data.
return newReaderAtAdapter(r), nil
}
b, err := io.ReadAll(readerCloser)
@ -75,3 +81,58 @@ func GetUnionReader(readerCloser io.ReadCloser) (UnionReader, error) {
return reader, nil
}
type readerAtAdapter struct {
io.ReadSeekCloser
mu *sync.Mutex
}
func newReaderAtAdapter(rs io.ReadSeekCloser) UnionReader {
return &readerAtAdapter{
ReadSeekCloser: rs,
mu: &sync.Mutex{},
}
}
func (r *readerAtAdapter) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.ReadSeekCloser.Read(p)
}
func (r *readerAtAdapter) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.ReadSeekCloser.Seek(offset, whence)
}
func (r *readerAtAdapter) ReadAt(p []byte, off int64) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
currentPos, err := r.ReadSeekCloser.Seek(0, io.SeekCurrent) // save current pos
if err != nil {
return 0, err
}
_, err = r.ReadSeekCloser.Seek(off, io.SeekStart) // seek to absolute position `off`
if err != nil {
return 0, err
}
n, err = r.ReadSeekCloser.Read(p) // read from that absolute position
// restore the position for the stateful read/seek operations
if restoreErr := r.restorePosition(currentPos); restoreErr != nil {
if err == nil {
err = restoreErr
}
}
return n, err
}
func (r *readerAtAdapter) restorePosition(pos int64) error {
_, err := r.ReadSeekCloser.Seek(pos, io.SeekStart)
return err
}

View File

@ -1,8 +1,10 @@
package unionreader
import (
"bytes"
"io"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
@ -61,3 +63,275 @@ func Test_getUnionReader_fileLocationReadCloser(t *testing.T) {
require.NoError(t, err)
require.Equal(t, p, ur)
}
func TestReaderAtAdapter_ReadAt(t *testing.T) {
testData := "Hello, World! This is a test string for ReadAt."
t.Run("basic functionality", func(t *testing.T) {
reader := newReadSeekCloser(strings.NewReader(testData))
adapter := newReaderAtAdapter(reader)
tests := []struct {
name string
offset int64
length int
expected string
}{
{name: "read from beginning", offset: 0, length: 5, expected: "Hello"},
{name: "read from middle", offset: 7, length: 5, expected: "World"},
{name: "read from end", offset: int64(len(testData) - 4), length: 4, expected: "dAt."},
{name: "read single character", offset: 12, length: 1, expected: "!"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := make([]byte, tt.length)
n, err := adapter.ReadAt(buf, tt.offset)
if err != nil && err != io.EOF {
t.Fatalf("Unexpected error: %v", err)
}
result := string(buf[:n])
if result != tt.expected {
t.Errorf("Expected %q, got %q", tt.expected, result)
}
})
}
})
t.Run("edge cases", func(t *testing.T) {
tests := []struct {
name string
data string
offset int64
bufSize int
expectedN int
expectedErr error
expectedStr string
}{
{
name: "beyond EOF",
data: "Hello",
offset: 10,
bufSize: 5,
expectedN: 0,
expectedErr: io.EOF,
expectedStr: "",
},
{
name: "partial read",
data: "Hello",
offset: 2,
bufSize: 10,
expectedN: 3,
expectedErr: nil,
expectedStr: "llo",
},
{
name: "empty buffer",
data: "Hello",
offset: 0,
bufSize: 0,
expectedN: 0,
expectedErr: nil,
expectedStr: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
reader := newReadSeekCloser(strings.NewReader(tt.data))
adapter := newReaderAtAdapter(reader)
buf := make([]byte, tt.bufSize)
n, err := adapter.ReadAt(buf, tt.offset)
if err != tt.expectedErr {
t.Errorf("Expected error %v, got %v", tt.expectedErr, err)
}
if n != tt.expectedN {
t.Errorf("Expected %d bytes read, got %d", tt.expectedN, n)
}
result := string(buf[:n])
if result != tt.expectedStr {
t.Errorf("Expected %q, got %q", tt.expectedStr, result)
}
})
}
})
t.Run("multiple reads from same position", func(t *testing.T) {
reader := newReadSeekCloser(strings.NewReader(testData))
adapter := newReaderAtAdapter(reader)
// read the same data multiple times
for i := 0; i < 3; i++ {
buf := make([]byte, 5)
n, err := adapter.ReadAt(buf, 7)
if err != nil && err != io.EOF {
t.Fatalf("ReadAt %d failed: %v", i, err)
}
result := string(buf[:n])
if result != "World" {
t.Errorf("ReadAt %d: expected 'World', got %q", i, result)
}
}
})
t.Run("concurrent access", func(t *testing.T) {
td := "0123456789abcdefghijklmnopqrstuvwxyz"
reader := newReadSeekCloser(strings.NewReader(td))
adapter := newReaderAtAdapter(reader)
const numGoroutines = 10
const numReads = 100
var wg sync.WaitGroup
results := make(chan bool, numGoroutines*numReads)
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(goroutineID int) {
defer wg.Done()
for j := 0; j < numReads; j++ {
offset := int64(goroutineID % len(td))
buf := make([]byte, 1)
n, err := adapter.ReadAt(buf, offset)
if err != nil && err != io.EOF {
results <- false
return
}
if n > 0 {
expected := td[offset]
if buf[0] != expected {
results <- false
return
}
}
results <- true
}
}(i)
}
wg.Wait()
close(results)
successCount := 0
totalCount := 0
for success := range results {
totalCount++
if success {
successCount++
}
}
if successCount != totalCount {
t.Errorf("Concurrent reads failed: %d/%d successful", successCount, totalCount)
}
})
}
func TestReaderAtAdapter_PositionHandling(t *testing.T) {
testData := "Hello, World!"
t.Run("preserves position after ReadAt", func(t *testing.T) {
reader := newReadSeekCloser(strings.NewReader(testData))
adapter := newReaderAtAdapter(reader)
// move to a specific position
initialPos := int64(7)
_, err := adapter.Seek(initialPos, io.SeekStart)
if err != nil {
t.Fatalf("Failed to seek: %v", err)
}
// read using ReadAt
buf := make([]byte, 5)
_, err = adapter.ReadAt(buf, 0)
if err != nil && err != io.EOF {
t.Fatalf("ReadAt failed: %v", err)
}
// verify position is preserved
currentPos, err := adapter.Seek(0, io.SeekCurrent)
if err != nil {
t.Fatalf("Failed to get current position: %v", err)
}
if currentPos != initialPos {
t.Errorf("Position not preserved. Expected %d, got %d", initialPos, currentPos)
}
})
t.Run("does not affect regular reads", func(t *testing.T) {
reader := newReadSeekCloser(strings.NewReader(testData))
adapter := newReaderAtAdapter(reader)
// read first few bytes normally
normalBuf := make([]byte, 5)
n, err := adapter.Read(normalBuf)
if err != nil {
t.Fatalf("Normal read failed: %v", err)
}
if string(normalBuf[:n]) != "Hello" {
t.Errorf("Expected 'Hello', got %q", string(normalBuf[:n]))
}
// use ReadAt to read from a different position
readAtBuf := make([]byte, 5)
n, err = adapter.ReadAt(readAtBuf, 7)
if err != nil && err != io.EOF {
t.Fatalf("ReadAt failed: %v", err)
}
if string(readAtBuf[:n]) != "World" {
t.Errorf("Expected 'World', got %q", string(readAtBuf[:n]))
}
// continue normal reading - should pick up where we left off
continueBuf := make([]byte, 2)
n, err = adapter.Read(continueBuf)
if err != nil {
t.Fatalf("Continue read failed: %v", err)
}
if string(continueBuf[:n]) != ", " {
t.Errorf("Expected ', ', got %q", string(continueBuf[:n]))
}
})
}
func TestReaderAtAdapter_Close(t *testing.T) {
reader := newReadSeekCloser(bytes.NewReader([]byte("test data")))
adapter := newReaderAtAdapter(reader)
// test that adapter can be closed
err := adapter.Close()
if err != nil {
t.Errorf("Close failed: %v", err)
}
if !reader.closed {
t.Error("Underlying reader was not closed")
}
}
type readSeekCloser struct {
io.ReadSeeker
closed bool
}
func newReadSeekCloser(rs io.ReadSeeker) *readSeekCloser {
return &readSeekCloser{ReadSeeker: rs}
}
func (r *readSeekCloser) Close() error {
r.closed = true
return nil
}

View File

@ -4,7 +4,6 @@ import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"path"
"path/filepath"
@ -16,6 +15,7 @@ import (
"github.com/anchore/syft/internal"
"github.com/anchore/syft/internal/licenses"
"github.com/anchore/syft/internal/log"
"github.com/anchore/syft/internal/unknown"
"github.com/anchore/syft/syft/artifact"
"github.com/anchore/syft/syft/file"
"github.com/anchore/syft/syft/pkg"
@ -26,17 +26,15 @@ import (
// fields are governed by the PyPA core metadata specification (https://packaging.python.org/en/latest/specifications/core-metadata/).
func parseWheelOrEgg(ctx context.Context, resolver file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) {
pd, sources, err := assembleEggOrWheelMetadata(resolver, reader.Location)
if err != nil {
return nil, nil, err
}
if pd == nil {
return nil, nil, nil
return nil, nil, err
}
// This can happen for Python 2.7 where it is reported from an egg-info, but Python is
// the actual runtime, it isn't a "package". The special-casing here allows to skip it
if pd.Name == "Python" {
return nil, nil, nil
return nil, nil, err
}
pkgs := []pkg.Package{
@ -47,11 +45,11 @@ func parseWheelOrEgg(ctx context.Context, resolver file.Resolver, _ *generic.Env
),
}
return pkgs, nil, nil
return pkgs, nil, err
}
// fetchInstalledFiles finds a corresponding installed-files.txt file for the given python package metadata file and returns the set of file records contained.
func fetchInstalledFiles(resolver file.Resolver, metadataLocation file.Location, sitePackagesRootPath string) (files []pkg.PythonFileRecord, sources []file.Location, err error) {
func fetchInstalledFiles(resolver file.Resolver, metadataLocation file.Location, sitePackagesRootPath string) (files []pkg.PythonFileRecord, sources []file.Location, retErr error) {
// we've been given a file reference to a specific wheel METADATA file. note: this may be for a directory
// or for an image... for an image the METADATA file may be present within multiple layers, so it is important
// to reconcile the installed-files.txt path to the same layer (or the next adjacent lower layer).
@ -72,8 +70,7 @@ func fetchInstalledFiles(resolver file.Resolver, metadataLocation file.Location,
// parse the installed-files contents
installedFiles, err := parseInstalledFiles(installedFilesContents, metadataLocation.RealPath, sitePackagesRootPath)
if err != nil {
log.WithFields("error", err, "path", metadataLocation.RealPath).Trace("unable to parse installed-files.txt for python package")
return files, sources, nil
retErr = unknown.Newf(*installedFilesRef, "unable to parse installed-files.txt for python package: %w", retErr)
}
files = append(files, installedFiles...)
@ -82,7 +79,7 @@ func fetchInstalledFiles(resolver file.Resolver, metadataLocation file.Location,
}
// fetchRecordFiles finds a corresponding RECORD file for the given python package metadata file and returns the set of file records contained.
func fetchRecordFiles(resolver file.Resolver, metadataLocation file.Location) (files []pkg.PythonFileRecord, sources []file.Location, err error) {
func fetchRecordFiles(resolver file.Resolver, metadataLocation file.Location) (files []pkg.PythonFileRecord, sources []file.Location, retErr error) {
// we've been given a file reference to a specific wheel METADATA file. note: this may be for a directory
// or for an image... for an image the METADATA file may be present within multiple layers, so it is important
// to reconcile the RECORD path to the same layer (or the next adjacent lower layer).
@ -101,11 +98,12 @@ func fetchRecordFiles(resolver file.Resolver, metadataLocation file.Location) (f
defer internal.CloseAndLogError(recordContents, recordPath)
// parse the record contents
records := parseWheelOrEggRecord(recordContents)
var records []pkg.PythonFileRecord
records, retErr = parseWheelOrEggRecord(file.NewLocationReadCloser(*recordRef, recordContents))
files = append(files, records...)
}
return files, sources, nil
return files, sources, retErr
}
// fetchTopLevelPackages finds a corresponding top_level.txt file for the given python package metadata file and returns the set of package names contained.
@ -133,7 +131,7 @@ func fetchTopLevelPackages(resolver file.Resolver, metadataLocation file.Locatio
}
if err := scanner.Err(); err != nil {
return nil, nil, fmt.Errorf("could not read python package top_level.txt: %w", err)
return nil, nil, err
}
return pkgs, sources, nil
@ -216,14 +214,15 @@ func assembleEggOrWheelMetadata(resolver file.Resolver, metadataLocation file.Lo
}
// attach any python files found for the given wheel/egg installation
var errs error
r, s, err := fetchRecordFiles(resolver, metadataLocation)
if err != nil {
return nil, nil, err
errs = unknown.Joinf(errs, "could not read python package RECORD file: %w", err)
}
if len(r) == 0 {
r, s, err = fetchInstalledFiles(resolver, metadataLocation, pd.SitePackagesRootPath)
if err != nil {
return nil, nil, err
errs = unknown.Joinf(errs, "could not read python package installed-files.txt: %w", err)
}
}
@ -233,7 +232,7 @@ func assembleEggOrWheelMetadata(resolver file.Resolver, metadataLocation file.Lo
// attach any top-level package names found for the given wheel/egg installation
p, s, err := fetchTopLevelPackages(resolver, metadataLocation)
if err != nil {
return nil, nil, err
errs = unknown.Joinf(errs, "could not read python package top_level.txt: %w", err)
}
sources = append(sources, s...)
pd.TopLevelPackages = p
@ -241,12 +240,12 @@ func assembleEggOrWheelMetadata(resolver file.Resolver, metadataLocation file.Lo
// attach any direct-url package data found for the given wheel/egg installation
d, s, err := fetchDirectURLData(resolver, metadataLocation)
if err != nil {
return nil, nil, err
errs = unknown.Joinf(errs, "could not read python package direct_url.json: %w", err)
}
sources = append(sources, s...)
pd.DirectURLOrigin = d
return &pd, sources, nil
return &pd, sources, errs
}
func findLicenses(ctx context.Context, resolver file.Resolver, m parsedData) pkg.LicenseSet {

View File

@ -10,25 +10,33 @@ import (
"strings"
"github.com/anchore/syft/internal/log"
"github.com/anchore/syft/internal/unknown"
"github.com/anchore/syft/syft/file"
"github.com/anchore/syft/syft/pkg"
)
// parseWheelOrEggRecord takes a Python Egg or Wheel (which share the same format and values for our purposes),
// returning all Python packages listed.
func parseWheelOrEggRecord(reader io.Reader) []pkg.PythonFileRecord {
func parseWheelOrEggRecord(reader file.LocationReadCloser) ([]pkg.PythonFileRecord, error) {
var records []pkg.PythonFileRecord
r := csv.NewReader(reader)
for {
recordList, err := r.Read()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
if err != nil {
log.Debugf("unable to read python record file: %w", err)
var parseErr *csv.ParseError
if errors.As(err, &parseErr) {
log.WithFields("error", parseErr).Debug("unable to read python record entry (skipping entry)")
continue
}
// probably an I/O error... we could have missed some package content, so we include this location as an unknown
return records, unknown.Newf(reader.Coordinates, "unable to read python record file: %w", err)
}
if len(recordList) != 3 {
log.Debugf("python record an unexpected length=%d: %q", len(recordList), recordList)
continue
@ -62,7 +70,7 @@ func parseWheelOrEggRecord(reader io.Reader) []pkg.PythonFileRecord {
records = append(records, record)
}
return records
return records, nil
}
func parseInstalledFiles(reader io.Reader, location, sitePackagesRootPath string) ([]pkg.PythonFileRecord, error) {

View File

@ -5,7 +5,9 @@ import (
"testing"
"github.com/go-test/deep"
"github.com/stretchr/testify/require"
"github.com/anchore/syft/syft/file"
"github.com/anchore/syft/syft/pkg"
)
@ -46,7 +48,8 @@ func TestParseWheelEggRecord(t *testing.T) {
t.Fatalf("failed to open fixture: %+v", err)
}
actual := parseWheelOrEggRecord(fixture)
actual, err := parseWheelOrEggRecord(file.NewLocationReadCloser(file.NewLocation(test.Fixture), fixture))
require.NoError(t, err, "failed to parse: %+v", err)
for _, d := range deep.Equal(actual, test.ExpectedMetadata) {
t.Errorf("diff: %+v", d)

View File

@ -0,0 +1,23 @@
package source
import "github.com/anchore/syft/syft/file"
type SnapMetadata struct {
// Summary is a brief description of the snap package
Summary string `yaml:"summary" json:"summary,omitempty"`
// Base is the base snap this package builds upon
Base string `yaml:"base" json:"base,omitempty"`
// Grade is the development stage (stable, candidate, beta, edge)
Grade string `yaml:"grade" json:"grade,omitempty"`
// Confinement is the security isolation level (strict, classic, devmode)
Confinement string `yaml:"confinement" json:"confinement,omitempty"`
// Architectures are the supported CPU architectures
Architectures []string `yaml:"architectures" json:"architectures,omitempty"`
// Digests are hashes of the snap squashfs files
Digests []file.Digest `yaml:"digests" json:"digests,omitempty"`
}

View File

@ -0,0 +1,56 @@
package snapsource
import (
"fmt"
"github.com/goccy/go-yaml"
"github.com/anchore/syft/internal"
"github.com/anchore/syft/syft/file"
)
type snapManifest struct {
Name string `yaml:"name"`
Version string `yaml:"version"`
Summary string `yaml:"summary"`
Base string `yaml:"base"`
Grade string `yaml:"grade"`
Confinement string `yaml:"confinement"`
Architectures []string `yaml:"architectures"`
}
const manifestLocation = "/meta/snap.yaml"
func parseManifest(resolver file.Resolver) (*snapManifest, error) {
locations, err := resolver.FilesByPath(manifestLocation)
if err != nil {
return nil, fmt.Errorf("unable to find snap manifest file: %w", err)
}
if len(locations) == 0 {
return nil, fmt.Errorf("no snap manifest file found")
}
if len(locations) > 1 {
return nil, fmt.Errorf("multiple snap manifest files found")
}
manifestFile := locations[0]
reader, err := resolver.FileContentsByLocation(manifestFile)
if err != nil {
return nil, fmt.Errorf("unable to read snap manifest file: %w", err)
}
defer internal.CloseAndLogError(reader, manifestFile.RealPath)
var manifest snapManifest
if err := yaml.NewDecoder(reader).Decode(&manifest); err != nil {
return nil, fmt.Errorf("unable to decode snap manifest file: %w", err)
}
if manifest.Name == "" || manifest.Version == "" {
return nil, fmt.Errorf("invalid snap manifest file: missing name or version")
}
return &manifest, nil
}

View File

@ -0,0 +1,249 @@
package snapsource
import (
"context"
"crypto"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"github.com/spf13/afero"
stereoFile "github.com/anchore/stereoscope/pkg/file"
"github.com/anchore/syft/internal/bus"
intFile "github.com/anchore/syft/internal/file"
"github.com/anchore/syft/internal/log"
"github.com/anchore/syft/syft/event/monitor"
"github.com/anchore/syft/syft/file"
)
type snapFile struct {
Path string
Digests []file.Digest
MimeType string
Cleanup func() error
}
type remoteSnap struct {
snapIdentity
URL string
}
type snapIdentity struct {
Name string
Channel string
Architecture string
}
func (s snapIdentity) String() string {
parts := []string{s.Name}
if s.Channel != "" {
parts = append(parts, fmt.Sprintf("@%s", s.Channel))
}
if s.Architecture != "" {
parts = append(parts, fmt.Sprintf(" (%s)", s.Architecture))
}
return strings.Join(parts, "")
}
func getRemoteSnapFile(ctx context.Context, fs afero.Fs, getter intFile.Getter, cfg Config) (*snapFile, error) {
if cfg.Request == "" {
return nil, fmt.Errorf("invalid request: %q", cfg.Request)
}
var architecture string
if cfg.Platform != nil {
architecture = cfg.Platform.Architecture
}
info, err := resolveRemoteSnap(cfg.Request, architecture)
if err != nil {
return nil, err
}
return newSnapFileFromRemote(ctx, fs, cfg, getter, info)
}
func newSnapFileFromRemote(ctx context.Context, fs afero.Fs, cfg Config, getter intFile.Getter, info *remoteSnap) (*snapFile, error) {
t, err := afero.TempDir(fs, "", "syft-snap-")
if err != nil {
return nil, fmt.Errorf("failed to create temp directory: %w", err)
}
snapFilePath := path.Join(t, path.Base(info.URL))
err = downloadSnap(getter, info, snapFilePath)
if err != nil {
return nil, fmt.Errorf("failed to download snap file: %w", err)
}
closer := func() error {
return fs.RemoveAll(t)
}
mimeType, digests, err := getSnapFileInfo(ctx, fs, snapFilePath, cfg.DigestAlgorithms)
if err != nil {
return nil, err
}
return &snapFile{
Path: snapFilePath,
Digests: digests,
MimeType: mimeType,
Cleanup: closer,
}, nil
}
func newSnapFromFile(ctx context.Context, fs afero.Fs, cfg Config) (*snapFile, error) {
var architecture string
if cfg.Platform != nil {
architecture = cfg.Platform.Architecture
}
if architecture != "" {
return nil, fmt.Errorf("architecture cannot be specified for local snap files: %q", cfg.Request)
}
absPath, err := filepath.Abs(cfg.Request)
if err != nil {
return nil, fmt.Errorf("unable to get absolute path of snap: %w", err)
}
mimeType, digests, err := getSnapFileInfo(ctx, fs, absPath, cfg.DigestAlgorithms)
if err != nil {
return nil, err
}
return &snapFile{
Path: absPath,
Digests: digests,
MimeType: mimeType,
// note: we have no closer since this is the user's file (never delete it)
}, nil
}
func getSnapFileInfo(ctx context.Context, fs afero.Fs, path string, hashes []crypto.Hash) (string, []file.Digest, error) {
fileMeta, err := fs.Stat(path)
if err != nil {
return "", nil, fmt.Errorf("unable to stat path=%q: %w", path, err)
}
if fileMeta.IsDir() {
return "", nil, fmt.Errorf("given path is a directory, not a snap file: %q", path)
}
fh, err := fs.Open(path)
if err != nil {
return "", nil, fmt.Errorf("unable to open file=%q: %w", path, err)
}
defer fh.Close()
mimeType := stereoFile.MIMEType(fh)
if !isSquashFSFile(mimeType, path) {
return "", nil, fmt.Errorf("not a valid squashfs/snap file: %q (mime-type=%q)", path, mimeType)
}
var digests []file.Digest
if len(hashes) > 0 {
if _, err := fh.Seek(0, 0); err != nil {
return "", nil, fmt.Errorf("unable to reset file position: %w", err)
}
digests, err = intFile.NewDigestsFromFile(ctx, fh, hashes)
if err != nil {
return "", nil, fmt.Errorf("unable to calculate digests for file=%q: %w", path, err)
}
}
return mimeType, digests, nil
}
// resolveRemoteSnap parses a snap request and returns the appropriate path or URL
// The request can be:
// - A snap name (e.g., "etcd")
// - A snap name with channel (e.g., "etcd@beta" or "etcd@2.3/stable")
func resolveRemoteSnap(request, architecture string) (*remoteSnap, error) {
if architecture == "" {
architecture = defaultArchitecture
}
snapName, channel := parseSnapRequest(request)
id := snapIdentity{
Name: snapName,
Channel: channel,
Architecture: architecture,
}
client := newSnapcraftClient()
downloadURL, err := client.GetSnapDownloadURL(id)
if err != nil {
return nil, err
}
log.WithFields("url", downloadURL, "name", snapName, "channel", channel, "architecture", architecture).Debugf("snap resolved")
return &remoteSnap{
snapIdentity: id,
URL: downloadURL,
}, nil
}
// parseSnapRequest parses a snap request into name and channel
// Examples:
// - "etcd" -> name="etcd", channel="stable" (default)
// - "etcd@beta" -> name="etcd", channel="beta"
// - "etcd@2.3/stable" -> name="etcd", channel="2.3/stable"
func parseSnapRequest(request string) (name, channel string) {
parts := strings.SplitN(request, "@", 2)
name = parts[0]
if len(parts) == 2 {
channel = parts[1]
}
if channel == "" {
channel = defaultChannel
}
return name, channel
}
func downloadSnap(getter intFile.Getter, info *remoteSnap, dest string) error {
log.WithFields("url", info.URL, "destination", dest).Debug("downloading snap file")
prog := bus.StartPullSourceTask(monitor.GenericTask{
Title: monitor.Title{
Default: "Download snap",
WhileRunning: "Downloading snap",
OnSuccess: "Downloaded snap",
},
HideOnSuccess: false,
HideStageOnSuccess: true,
ID: "",
ParentID: "",
Context: info.String(),
}, -1, "")
if err := getter.GetFile(dest, info.URL, prog.Manual); err != nil {
prog.SetError(err)
return fmt.Errorf("failed to download snap file at %q: %w", info.URL, err)
}
prog.SetCompleted()
return nil
}
// fileExists checks if a file exists and is not a directory
func fileExists(fs afero.Fs, path string) bool {
info, err := fs.Stat(path)
if os.IsNotExist(err) {
return false
}
return err == nil && !info.IsDir()
}

View File

@ -0,0 +1,378 @@
package snapsource
import (
"context"
"crypto"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/OneOfOne/xxhash"
diskFile "github.com/diskfs/go-diskfs/backend/file"
"github.com/diskfs/go-diskfs/filesystem"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
"github.com/hashicorp/go-cleanhttp"
"github.com/opencontainers/go-digest"
"github.com/spf13/afero"
"github.com/anchore/clio"
"github.com/anchore/go-homedir"
stereoFile "github.com/anchore/stereoscope/pkg/file"
"github.com/anchore/stereoscope/pkg/filetree"
"github.com/anchore/stereoscope/pkg/image"
"github.com/anchore/syft/internal/bus"
intFile "github.com/anchore/syft/internal/file"
"github.com/anchore/syft/internal/log"
"github.com/anchore/syft/syft/artifact"
"github.com/anchore/syft/syft/event/monitor"
"github.com/anchore/syft/syft/file"
"github.com/anchore/syft/syft/internal/fileresolver"
"github.com/anchore/syft/syft/source"
"github.com/anchore/syft/syft/source/internal"
)
var _ source.Source = (*snapSource)(nil)
type Config struct {
ID clio.Identification
Request string
Platform *image.Platform
Exclude source.ExcludeConfig
DigestAlgorithms []crypto.Hash
Alias source.Alias
fs afero.Fs
}
type snapSource struct {
id artifact.ID
config Config
resolver file.Resolver
mutex *sync.Mutex
manifest snapManifest
digests []file.Digest
fs filesystem.FileSystem
squashfsPath string
squashFileCloser func() error
closer func() error
}
func NewFromLocal(cfg Config) (source.Source, error) {
f, err := getLocalSnapFile(&cfg)
if err != nil {
return nil, err
}
return newFromPath(cfg, f)
}
func getLocalSnapFile(cfg *Config) (*snapFile, error) {
expandedPath, err := homedir.Expand(cfg.Request)
if err != nil {
return nil, fmt.Errorf("unable to expand path %q: %w", cfg.Request, err)
}
cfg.Request = filepath.Clean(expandedPath)
if cfg.fs == nil {
cfg.fs = afero.NewOsFs()
}
if !fileExists(cfg.fs, cfg.Request) {
return nil, fmt.Errorf("snap file %q does not exist", cfg.Request)
}
log.WithFields("path", cfg.Request).Debug("snap is a local file")
return newSnapFromFile(context.Background(), cfg.fs, *cfg)
}
func NewFromRemote(cfg Config) (source.Source, error) {
expandedPath, err := homedir.Expand(cfg.Request)
if err != nil {
return nil, fmt.Errorf("unable to expand path %q: %w", cfg.Request, err)
}
cfg.Request = filepath.Clean(expandedPath)
if cfg.fs == nil {
cfg.fs = afero.NewOsFs()
}
client := intFile.NewGetter(cfg.ID, cleanhttp.DefaultClient())
f, err := getRemoteSnapFile(context.Background(), cfg.fs, client, cfg)
if err != nil {
return nil, err
}
return newFromPath(cfg, f)
}
func newFromPath(cfg Config, f *snapFile) (source.Source, error) {
s := &snapSource{
id: deriveID(cfg.Request, cfg.Alias.Name, cfg.Alias.Version, f.Digests),
config: cfg,
mutex: &sync.Mutex{},
digests: f.Digests,
squashfsPath: f.Path,
closer: f.Cleanup,
}
return s, s.extractManifest()
}
func (s *snapSource) extractManifest() error {
r, err := s.FileResolver(source.SquashedScope)
if err != nil {
return fmt.Errorf("unable to create snap file resolver: %w", err)
}
manifest, err := parseManifest(r)
if err != nil {
return fmt.Errorf("unable to parse snap manifest file: %w", err)
}
if manifest != nil {
s.manifest = *manifest
}
return nil
}
func (s snapSource) ID() artifact.ID {
return s.id
}
func (s snapSource) NameVersion() (string, string) {
name := s.manifest.Name
version := s.manifest.Version
if !s.config.Alias.IsEmpty() {
a := s.config.Alias
if a.Name != "" {
name = a.Name
}
if a.Version != "" {
version = a.Version
}
}
return name, version
}
func (s snapSource) Describe() source.Description {
name, version := s.NameVersion()
return source.Description{
ID: string(s.id),
Name: name,
Version: version,
Metadata: source.SnapMetadata{
Summary: s.manifest.Summary,
Base: s.manifest.Base,
Grade: s.manifest.Grade,
Confinement: s.manifest.Confinement,
Architectures: s.manifest.Architectures,
Digests: s.digests,
},
}
}
func (s *snapSource) Close() error {
if s.squashFileCloser != nil {
if err := s.squashFileCloser(); err != nil {
return fmt.Errorf("unable to close snap resolver: %w", err)
}
s.squashFileCloser = nil
}
s.resolver = nil
if s.fs != nil {
if err := s.fs.Close(); err != nil {
return fmt.Errorf("unable to close snap squashfs: %w", err)
}
}
if s.closer != nil {
if err := s.closer(); err != nil {
return fmt.Errorf("unable to close snap source: %w", err)
}
}
return nil
}
func (s *snapSource) FileResolver(_ source.Scope) (file.Resolver, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.resolver != nil {
return s.resolver, nil
}
log.Debugf("parsing squashfs file: %s", s.squashfsPath)
f, err := os.Open(s.squashfsPath)
if err != nil {
return nil, fmt.Errorf("unable to open squashfs file: %w", err)
}
s.squashFileCloser = func() error {
if err := f.Close(); err != nil {
return fmt.Errorf("unable to close squashfs file: %w", err)
}
return nil
}
fileMeta, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("unable to stat squashfs file: %w", err)
}
size := fileMeta.Size()
fileCatalog := image.NewFileCatalog()
prog := bus.StartIndexingFiles(filepath.Base(s.squashfsPath))
b := diskFile.New(f, true)
fs, err := squashfs.Read(b, fileMeta.Size(), 0, 0)
if err != nil {
err := fmt.Errorf("unable to open squashfs file: %w", err)
prog.SetError(err)
return nil, err
}
tree := filetree.New()
if err := intFile.WalkDiskDir(fs, "/", squashfsVisitor(tree, fileCatalog, &size, prog)); err != nil {
err := fmt.Errorf("failed to walk squashfs file=%q: %w", s.squashfsPath, err)
prog.SetError(err)
return nil, err
}
prog.SetCompleted()
s.resolver = &fileresolver.FiletreeResolver{
Chroot: fileresolver.ChrootContext{},
Tree: tree,
Index: fileCatalog.Index,
SearchContext: filetree.NewSearchContext(tree, fileCatalog.Index),
Opener: func(ref stereoFile.Reference) (io.ReadCloser, error) {
return fileCatalog.Open(ref)
},
}
s.fs = fs
return s.resolver, nil
}
type linker interface {
Readlink() (string, error)
}
func squashfsVisitor(ft filetree.Writer, fileCatalog *image.FileCatalog, size *int64, prog *monitor.TaskProgress) intFile.WalkDiskDirFunc {
builder := filetree.NewBuilder(ft, fileCatalog.Index)
return func(fsys filesystem.FileSystem, path string, d os.FileInfo, walkErr error) error {
if walkErr != nil {
log.WithFields("error", walkErr, "path", path).Trace("unable to walk squash file path")
return walkErr
}
prog.AtomicStage.Set(path)
var f filesystem.File
var mimeType string
var err error
if !d.IsDir() {
f, err = fsys.OpenFile(path, os.O_RDONLY)
if err != nil {
log.WithFields("error", err, "path", path).Trace("unable to open squash file path")
} else {
defer f.Close()
mimeType = stereoFile.MIMEType(f)
}
}
var ty stereoFile.Type
var linkPath string
switch {
case d.IsDir():
// in some implementations, the mode does not indicate a directory, so we check the FileInfo type explicitly
ty = stereoFile.TypeDirectory
default:
ty = stereoFile.TypeFromMode(d.Mode())
if ty == stereoFile.TypeSymLink && f != nil {
if l, ok := f.(linker); ok {
linkPath, _ = l.Readlink()
}
}
}
metadata := stereoFile.Metadata{
FileInfo: d,
Path: path,
LinkDestination: linkPath,
Type: ty,
MIMEType: mimeType,
}
fileReference, err := builder.Add(metadata)
if err != nil {
return err
}
if fileReference == nil {
return nil
}
if size != nil {
*(size) += metadata.Size()
}
fileCatalog.AssociateOpener(*fileReference, func() (io.ReadCloser, error) {
return fsys.OpenFile(path, os.O_RDONLY)
})
prog.Increment()
return nil
}
}
func isSquashFSFile(mimeType, path string) bool {
if mimeType == "application/vnd.squashfs" || mimeType == "application/x-squashfs" {
return true
}
ext := filepath.Ext(path)
return ext == ".snap" || ext == ".squashfs"
}
func deriveID(path, name, version string, digests []file.Digest) artifact.ID {
var xxhDigest string
for _, d := range digests {
if strings.ToLower(strings.ReplaceAll(d.Algorithm, "-", "")) == "xxh64" {
xxhDigest = d.Value
break
}
}
if xxhDigest == "" {
xxhDigest = digestOfFileContents(path)
}
info := fmt.Sprintf("%s:%s@%s", xxhDigest, name, version)
return internal.ArtifactIDFromDigest(digest.SHA256.FromString(info).String())
}
// return the xxhash64 of the file contents, or the xxhash64 of the path if the file cannot be read
func digestOfFileContents(path string) string {
f, err := os.Open(path)
if err != nil {
return digestOfReader(strings.NewReader(path))
}
defer f.Close()
return digestOfReader(f)
}
func digestOfReader(r io.Reader) string {
hasher := xxhash.New64()
_, _ = io.Copy(hasher, r)
return fmt.Sprintf("%x", hasher.Sum(nil))
}

View File

@ -0,0 +1,54 @@
package snapsource
import (
"context"
"crypto"
"github.com/anchore/syft/syft/source"
)
type snapSourceProvider struct {
local bool
path string
exclude source.ExcludeConfig
digestAlgorithms []crypto.Hash
alias source.Alias
}
// NewLocalSourceProvider creates a new provider for snap files from a local path.
func NewLocalSourceProvider(path string, exclude source.ExcludeConfig, digestAlgorithms []crypto.Hash, alias source.Alias) source.Provider {
return &snapSourceProvider{
local: true,
path: path,
exclude: exclude,
digestAlgorithms: digestAlgorithms,
alias: alias,
}
}
// NewRemoteSourceProvider creates a new provider for snap files from a remote location.
func NewRemoteSourceProvider(path string, exclude source.ExcludeConfig, digestAlgorithms []crypto.Hash, alias source.Alias) source.Provider {
return &snapSourceProvider{
path: path,
exclude: exclude,
digestAlgorithms: digestAlgorithms,
alias: alias,
}
}
func (p snapSourceProvider) Name() string {
return "snap"
}
func (p snapSourceProvider) Provide(_ context.Context) (source.Source, error) {
cfg := Config{
Request: p.path,
Exclude: p.exclude,
DigestAlgorithms: p.digestAlgorithms,
Alias: p.alias,
}
if p.local {
return NewFromLocal(cfg)
}
return NewFromRemote(cfg)
}

View File

@ -0,0 +1,86 @@
package snapsource
import (
"crypto"
"fmt"
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/anchore/go-homedir"
"github.com/anchore/stereoscope/pkg/image"
)
func TestNewFromLocal(t *testing.T) {
tests := []struct {
name string
cfg Config
setup func(fs afero.Fs)
wantRequest string
wantErr assert.ErrorAssertionFunc
}{
{
name: "local file exists",
cfg: Config{
Request: "/test/local.snap",
DigestAlgorithms: []crypto.Hash{crypto.SHA256},
},
setup: func(fs afero.Fs) {
require.NoError(t, createMockSquashfsFile(fs, "/test/local.snap"))
},
wantRequest: "/test/local.snap",
},
{
name: "resolve home dir exists",
cfg: Config{
Request: "~/test/local.snap",
DigestAlgorithms: []crypto.Hash{crypto.SHA256},
},
wantErr: assert.Error,
wantRequest: func() string {
homeDir, err := homedir.Expand("~/test/local.snap")
require.NoError(t, err, "failed to expand home directory")
require.NotContains(t, homeDir, "~")
return homeDir
}(),
},
{
name: "local file with architecture specified",
cfg: Config{
Request: "/test/local.snap",
Platform: &image.Platform{
Architecture: "arm64",
},
},
setup: func(fs afero.Fs) {
require.NoError(t, createMockSquashfsFile(fs, "/test/local.snap"))
},
wantErr: func(t assert.TestingT, err error, msgAndArgs ...interface{}) bool {
return assert.Error(t, err, msgAndArgs...) && assert.Contains(t, err.Error(), "architecture cannot be specified for local snap files", msgAndArgs...)
},
wantRequest: "/test/local.snap",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.wantErr == nil {
tt.wantErr = assert.NoError
}
tt.cfg.fs = afero.NewMemMapFs() // Use an in-memory filesystem for testing
if tt.setup != nil {
tt.setup(tt.cfg.fs)
}
got, err := getLocalSnapFile(&tt.cfg)
tt.wantErr(t, err, fmt.Sprintf("NewFromLocal(%v)", tt.cfg))
assert.Equal(t, tt.wantRequest, tt.cfg.Request, "expected request path to match")
if err != nil {
require.Nil(t, got, "expected nil source on error")
return
}
require.NotNil(t, got, "expected non-nil source on success")
})
}
}

View File

@ -0,0 +1,612 @@
package snapsource
import (
"context"
"crypto"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/wagoodman/go-progress"
"github.com/anchore/stereoscope/pkg/image"
"github.com/anchore/syft/internal/file"
)
func TestSnapIdentity_String(t *testing.T) {
tests := []struct {
name string
identity snapIdentity
expected string
}{
{
name: "name only",
identity: snapIdentity{
Name: "etcd",
},
expected: "etcd",
},
{
name: "name with channel",
identity: snapIdentity{
Name: "etcd",
Channel: "stable",
},
expected: "etcd@stable",
},
{
name: "name with architecture",
identity: snapIdentity{
Name: "etcd",
Architecture: "amd64",
},
expected: "etcd (amd64)",
},
{
name: "name with channel and architecture",
identity: snapIdentity{
Name: "etcd",
Channel: "beta",
Architecture: "arm64",
},
expected: "etcd@beta (arm64)",
},
{
name: "empty channel with architecture",
identity: snapIdentity{
Name: "mysql",
Channel: "",
Architecture: "amd64",
},
expected: "mysql (amd64)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.identity.String()
assert.Equal(t, tt.expected, result)
})
}
}
func TestFileExists(t *testing.T) {
fs := afero.NewMemMapFs()
tests := []struct {
name string
setup func() string
expected bool
}{
{
name: "file exists",
setup: func() string {
path := "/test/file.snap"
require.NoError(t, createMockSquashfsFile(fs, path))
return path
},
expected: true,
},
{
name: "file does not exist",
setup: func() string {
return "/nonexistent/file.snap"
},
expected: false,
},
{
name: "path is directory",
setup: func() string {
path := "/test/dir"
require.NoError(t, fs.MkdirAll(path, 0755))
return path
},
expected: false,
},
{
name: "file exists in subdirectory",
setup: func() string {
path := "/deep/nested/path/file.snap"
require.NoError(t, createMockSquashfsFile(fs, path))
return path
},
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
path := tt.setup()
result := fileExists(fs, path)
assert.Equal(t, tt.expected, result)
})
}
}
func TestNewSnapFromFile(t *testing.T) {
ctx := context.Background()
fs := afero.NewMemMapFs()
tests := []struct {
name string
cfg Config
setup func() string
expectError bool
errorMsg string
}{
{
name: "valid local snap file",
cfg: Config{
DigestAlgorithms: []crypto.Hash{crypto.SHA256},
},
setup: func() string {
path := "/test/valid.snap"
require.NoError(t, createMockSquashfsFile(fs, path))
return path
},
expectError: false,
},
{
name: "architecture specified for local file",
cfg: Config{
Platform: &image.Platform{
Architecture: "arm64",
},
},
setup: func() string {
path := "/test/valid.snap"
require.NoError(t, createMockSquashfsFile(fs, path))
return path
},
expectError: true,
errorMsg: "architecture cannot be specified for local snap files",
},
{
name: "file does not exist",
cfg: Config{},
setup: func() string {
return "/nonexistent/file.snap"
},
expectError: true,
errorMsg: "unable to stat path",
},
{
name: "path is directory",
cfg: Config{},
setup: func() string {
path := "/test/directory"
require.NoError(t, fs.MkdirAll(path, 0755))
return path
},
expectError: true,
errorMsg: "given path is a directory",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
path := tt.setup()
tt.cfg.Request = path
result, err := newSnapFromFile(ctx, fs, tt.cfg)
if tt.expectError {
assert.Error(t, err)
if tt.errorMsg != "" {
assert.Contains(t, err.Error(), tt.errorMsg)
}
assert.Nil(t, result)
} else {
assert.NoError(t, err)
assert.NotNil(t, result)
assert.Equal(t, path, result.Path)
assert.NotEmpty(t, result.MimeType)
assert.NotEmpty(t, result.Digests)
assert.Nil(t, result.Cleanup) // Local files don't have cleanup
}
})
}
}
func TestNewSnapFileFromRemote(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
cfg Config
info *remoteSnap
setupMock func(*mockFileGetter, afero.Fs)
expectError bool
errorMsg string
validate func(t *testing.T, result *snapFile, fs afero.Fs)
}{
{
name: "successful remote snap download",
cfg: Config{
DigestAlgorithms: []crypto.Hash{crypto.SHA256},
},
info: &remoteSnap{
snapIdentity: snapIdentity{
Name: "etcd",
Channel: "stable",
Architecture: "amd64",
},
URL: "https://api.snapcraft.io/download/etcd_123.snap",
},
setupMock: func(mockGetter *mockFileGetter, fs afero.Fs) {
mockGetter.On("GetFile", mock.MatchedBy(func(dst string) bool {
// expect destination to end with etcd_123.snap
return filepath.Base(dst) == "etcd_123.snap"
}), "https://api.snapcraft.io/download/etcd_123.snap", mock.Anything).Run(func(args mock.Arguments) {
// simulate successful download by creating the file
dst := args.String(0)
require.NoError(t, createMockSquashfsFile(fs, dst))
}).Return(nil)
},
expectError: false,
validate: func(t *testing.T, result *snapFile, fs afero.Fs) {
assert.NotNil(t, result)
assert.Contains(t, result.Path, "etcd_123.snap")
assert.NotEmpty(t, result.MimeType)
assert.NotEmpty(t, result.Digests)
assert.NotNil(t, result.Cleanup)
_, err := fs.Stat(result.Path)
assert.NoError(t, err)
err = result.Cleanup()
require.NoError(t, err)
_, err = fs.Stat(result.Path)
assert.True(t, os.IsNotExist(err))
},
},
{
name: "successful download with no digest algorithms",
cfg: Config{
DigestAlgorithms: []crypto.Hash{}, // no digests requested
},
info: &remoteSnap{
snapIdentity: snapIdentity{
Name: "mysql",
Channel: "8.0/stable",
Architecture: "arm64",
},
URL: "https://api.snapcraft.io/download/mysql_456.snap",
},
setupMock: func(mockGetter *mockFileGetter, fs afero.Fs) {
mockGetter.On("GetFile", mock.MatchedBy(func(dst string) bool {
return filepath.Base(dst) == "mysql_456.snap"
}), "https://api.snapcraft.io/download/mysql_456.snap", mock.Anything).Run(func(args mock.Arguments) {
dst := args.String(0)
require.NoError(t, createMockSquashfsFile(fs, dst))
}).Return(nil)
},
expectError: false,
validate: func(t *testing.T, result *snapFile, fs afero.Fs) {
assert.NotNil(t, result)
assert.Contains(t, result.Path, "mysql_456.snap")
assert.NotEmpty(t, result.MimeType)
assert.Empty(t, result.Digests) // no digests requested
assert.NotNil(t, result.Cleanup)
},
},
{
name: "download fails",
cfg: Config{
DigestAlgorithms: []crypto.Hash{crypto.SHA256},
},
info: &remoteSnap{
snapIdentity: snapIdentity{
Name: "failing-snap",
Channel: "stable",
Architecture: "amd64",
},
URL: "https://api.snapcraft.io/download/failing_snap.snap",
},
setupMock: func(mockGetter *mockFileGetter, fs afero.Fs) {
mockGetter.On("GetFile", mock.AnythingOfType("string"), "https://api.snapcraft.io/download/failing_snap.snap", mock.Anything).Return(fmt.Errorf("network timeout"))
},
expectError: true,
errorMsg: "failed to download snap file",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fs := afero.NewOsFs()
mockGetter := &mockFileGetter{}
if tt.setupMock != nil {
tt.setupMock(mockGetter, fs)
}
result, err := newSnapFileFromRemote(ctx, fs, tt.cfg, mockGetter, tt.info)
if tt.expectError {
require.Error(t, err)
if tt.errorMsg != "" {
assert.Contains(t, err.Error(), tt.errorMsg)
}
assert.Nil(t, result)
} else {
require.NoError(t, err)
if tt.validate != nil {
tt.validate(t, result, fs)
}
}
mockGetter.AssertExpectations(t)
})
}
}
func TestGetSnapFileInfo(t *testing.T) {
ctx := context.Background()
fs := afero.NewMemMapFs()
tests := []struct {
name string
setup func() string
hashes []crypto.Hash
expectError bool
errorMsg string
}{
{
name: "valid squashfs file with hashes",
setup: func() string {
path := "/test/valid.snap"
require.NoError(t, createMockSquashfsFile(fs, path))
return path
},
hashes: []crypto.Hash{crypto.SHA256, crypto.MD5},
expectError: false,
},
{
name: "valid squashfs file without hashes",
setup: func() string {
path := "/test/valid.snap"
require.NoError(t, createMockSquashfsFile(fs, path))
return path
},
hashes: []crypto.Hash{},
expectError: false,
},
{
name: "file does not exist",
setup: func() string {
return "/nonexistent/file.snap"
},
expectError: true,
errorMsg: "unable to stat path",
},
{
name: "path is directory",
setup: func() string {
path := "/test/directory"
require.NoError(t, fs.MkdirAll(path, 0755))
return path
},
expectError: true,
errorMsg: "given path is a directory",
},
{
name: "invalid file format",
setup: func() string {
path := "/test/invalid.txt"
require.NoError(t, fs.MkdirAll(filepath.Dir(path), 0755))
file, err := fs.Create(path)
require.NoError(t, err)
defer file.Close()
_, err = file.Write([]byte("not a squashfs file"))
require.NoError(t, err)
return path
},
expectError: true,
errorMsg: "not a valid squashfs/snap file",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
path := tt.setup()
mimeType, digests, err := getSnapFileInfo(ctx, fs, path, tt.hashes)
if tt.expectError {
assert.Error(t, err)
if tt.errorMsg != "" {
assert.Contains(t, err.Error(), tt.errorMsg)
}
} else {
assert.NoError(t, err)
assert.NotEmpty(t, mimeType)
if len(tt.hashes) > 0 {
assert.Len(t, digests, len(tt.hashes))
} else {
assert.Empty(t, digests)
}
}
})
}
}
func TestDownloadSnap(t *testing.T) {
mockGetter := &mockFileGetter{}
tests := []struct {
name string
info *remoteSnap
dest string
setupMock func()
expectError bool
errorMsg string
}{
{
name: "successful download",
info: &remoteSnap{
snapIdentity: snapIdentity{
Name: "etcd",
Channel: "stable",
Architecture: "amd64",
},
URL: "https://example.com/etcd.snap",
},
dest: "/tmp/etcd.snap",
setupMock: func() {
mockGetter.On("GetFile", "/tmp/etcd.snap", "https://example.com/etcd.snap", mock.AnythingOfType("[]*progress.Manual")).Return(nil)
},
expectError: false,
},
{
name: "download fails",
info: &remoteSnap{
snapIdentity: snapIdentity{
Name: "etcd",
Channel: "stable",
Architecture: "amd64",
},
URL: "https://example.com/etcd.snap",
},
dest: "/tmp/etcd.snap",
setupMock: func() {
mockGetter.On("GetFile", "/tmp/etcd.snap", "https://example.com/etcd.snap", mock.AnythingOfType("[]*progress.Manual")).Return(fmt.Errorf("network error"))
},
expectError: true,
errorMsg: "failed to download snap file",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// reset mock for each test
mockGetter.ExpectedCalls = nil
if tt.setupMock != nil {
tt.setupMock()
}
err := downloadSnap(mockGetter, tt.info, tt.dest)
if tt.expectError {
assert.Error(t, err)
if tt.errorMsg != "" {
assert.Contains(t, err.Error(), tt.errorMsg)
}
} else {
assert.NoError(t, err)
}
mockGetter.AssertExpectations(t)
})
}
}
func TestParseSnapRequest(t *testing.T) {
tests := []struct {
name string
request string
expectedName string
expectedChannel string
}{
{
name: "snap name only - uses default channel",
request: "etcd",
expectedName: "etcd",
expectedChannel: "stable",
},
{
name: "snap with beta channel",
request: "etcd@beta",
expectedName: "etcd",
expectedChannel: "beta",
},
{
name: "snap with edge channel",
request: "etcd@edge",
expectedName: "etcd",
expectedChannel: "edge",
},
{
name: "snap with version track",
request: "etcd@2.3/stable",
expectedName: "etcd",
expectedChannel: "2.3/stable",
},
{
name: "snap with complex channel path",
request: "mysql@8.0/candidate",
expectedName: "mysql",
expectedChannel: "8.0/candidate",
},
{
name: "snap with multiple @ symbols - only first is delimiter",
request: "app@beta@test",
expectedName: "app",
expectedChannel: "beta@test",
},
{
name: "empty snap name with channel",
request: "@stable",
expectedName: "",
expectedChannel: "stable",
},
{
name: "snap name with empty channel - uses default",
request: "etcd@",
expectedName: "etcd",
expectedChannel: "stable",
},
{
name: "hyphenated snap name",
request: "hello-world@stable",
expectedName: "hello-world",
expectedChannel: "stable",
},
{
name: "snap name with numbers",
request: "app123",
expectedName: "app123",
expectedChannel: "stable",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
name, channel := parseSnapRequest(tt.request)
assert.Equal(t, tt.expectedName, name)
assert.Equal(t, tt.expectedChannel, channel)
})
}
}
type mockFileGetter struct {
mock.Mock
file.Getter
}
func (m *mockFileGetter) GetFile(dst, src string, monitor ...*progress.Manual) error {
args := m.Called(dst, src, monitor)
return args.Error(0)
}
func createMockSquashfsFile(fs afero.Fs, path string) error {
dir := filepath.Dir(path)
if err := fs.MkdirAll(dir, 0755); err != nil {
return err
}
file, err := fs.Create(path)
if err != nil {
return err
}
defer file.Close()
// write squashfs magic header
_, err = file.Write([]byte("hsqs"))
return err
}

View File

@ -0,0 +1,159 @@
package snapsource
import (
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/anchore/syft/internal/log"
)
const (
defaultChannel = "stable"
defaultArchitecture = "amd64"
defaultSeries = "16"
)
// snapcraftClient handles interactions with the Snapcraft API
type snapcraftClient struct {
InfoAPIURL string
FindAPIURL string
HTTPClient *http.Client
}
// newSnapcraftClient creates a new Snapcraft API client with default settings
func newSnapcraftClient() *snapcraftClient {
return &snapcraftClient{
InfoAPIURL: "https://api.snapcraft.io/v2/snaps/info/",
FindAPIURL: "https://api.snapcraft.io/v2/snaps/find",
HTTPClient: &http.Client{},
}
}
// snapcraftInfo represents the response from the snapcraft info API
type snapcraftInfo struct {
ChannelMap []snapChannelMapEntry `json:"channel-map"`
}
type snapChannelMapEntry struct {
Channel snapChannel `json:"channel"`
Download snapDownload `json:"download"`
}
type snapChannel struct {
Architecture string `json:"architecture"`
Name string `json:"name"`
}
type snapDownload struct {
URL string `json:"url"`
}
// snapFindResponse represents the response from the snapcraft find API (search v2)
type snapFindResponse struct {
Results []struct {
Name string `json:"name"`
SnapID string `json:"snap-id"`
Snap struct{} `json:"snap"`
} `json:"results"`
}
// GetSnapDownloadURL retrieves the download URL for a snap package
func (c *snapcraftClient) GetSnapDownloadURL(id snapIdentity) (string, error) {
apiURL := c.InfoAPIURL + id.Name
log.WithFields("name", id.Name, "channel", id.Channel, "architecture", id.Architecture).Trace("requesting snap info")
req, err := http.NewRequest(http.MethodGet, apiURL, nil)
if err != nil {
return "", fmt.Errorf("failed to create HTTP request: %w", err)
}
req.Header.Set("Snap-Device-Series", defaultSeries)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to send HTTP request: %w", err)
}
defer resp.Body.Close()
// handle 404 case - check if snap exists via find API
if resp.StatusCode == http.StatusNotFound {
log.WithFields("name", id.Name).Debug("snap info not found, checking if snap exists via find API")
exists, snapID, findErr := c.CheckSnapExists(id.Name)
if findErr != nil {
return "", fmt.Errorf("failed to check if snap exists: %w", findErr)
}
if exists {
return "", fmt.Errorf("found snap '%s' (id=%s) but it is unavailable for download", id.Name, snapID)
}
return "", fmt.Errorf("no snap found with name '%s'", id.Name)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("API request failed with status code %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read response body: %w", err)
}
var info snapcraftInfo
if err := json.Unmarshal(body, &info); err != nil {
return "", fmt.Errorf("failed to parse JSON response: %w", err)
}
for _, cm := range info.ChannelMap {
if cm.Channel.Architecture == id.Architecture && cm.Channel.Name == id.Channel {
return cm.Download.URL, nil
}
}
return "", fmt.Errorf("no matching snap found for %s", id.String())
}
// CheckSnapExists uses the find API (search v2) to check if a snap exists
func (c *snapcraftClient) CheckSnapExists(snapName string) (bool, string, error) {
req, err := http.NewRequest(http.MethodGet, c.FindAPIURL, nil)
if err != nil {
return false, "", fmt.Errorf("failed to create find request: %w", err)
}
q := req.URL.Query()
q.Add("name-startswith", snapName)
req.URL.RawQuery = q.Encode()
req.Header.Set("Snap-Device-Series", defaultSeries)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return false, "", fmt.Errorf("failed to send find request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return false, "", fmt.Errorf("find API request failed with status code %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return false, "", fmt.Errorf("failed to read find response body: %w", err)
}
var findResp snapFindResponse
if err := json.Unmarshal(body, &findResp); err != nil {
return false, "", fmt.Errorf("failed to parse find JSON response: %w", err)
}
// Look for exact name match
for _, result := range findResp.Results {
if result.Name == snapName {
return true, result.SnapID, nil
}
}
return false, "", nil
}

View File

@ -0,0 +1,383 @@
package snapsource
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSnapcraftClient_CheckSnapExists(t *testing.T) {
tests := []struct {
name string
snapName string
mockResponse snapFindResponse
statusCode int
expectedExists bool
expectedSnapID string
expectError require.ErrorAssertionFunc
errorContains string
}{
{
name: "snap exists",
snapName: "jp-ledger",
statusCode: http.StatusOK,
mockResponse: snapFindResponse{
Results: []struct {
Name string `json:"name"`
SnapID string `json:"snap-id"`
Snap struct{} `json:"snap"`
}{
{
Name: "jp-ledger",
SnapID: "jyDlMmifyQhSWGPM9fnKc1HSD7E6c47e",
Snap: struct{}{},
},
},
},
expectedExists: true,
expectedSnapID: "jyDlMmifyQhSWGPM9fnKc1HSD7E6c47e",
expectError: require.NoError,
},
{
name: "snap does not exist",
snapName: "nonexistent-snap",
statusCode: http.StatusOK,
mockResponse: snapFindResponse{
Results: []struct {
Name string `json:"name"`
SnapID string `json:"snap-id"`
Snap struct{} `json:"snap"`
}{},
},
expectedExists: false,
expectedSnapID: "",
expectError: require.NoError,
},
{
name: "multiple results - exact match found",
snapName: "test-snap",
statusCode: http.StatusOK,
mockResponse: snapFindResponse{
Results: []struct {
Name string `json:"name"`
SnapID string `json:"snap-id"`
Snap struct{} `json:"snap"`
}{
{
Name: "test-snap-extra",
SnapID: "wrong-id",
Snap: struct{}{},
},
{
Name: "test-snap",
SnapID: "correct-id",
Snap: struct{}{},
},
},
},
expectedExists: true,
expectedSnapID: "correct-id",
expectError: require.NoError,
},
{
name: "find API returns 404",
snapName: "test",
statusCode: http.StatusNotFound,
expectError: require.Error,
errorContains: "find API request failed with status code 404",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
findServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, defaultSeries, r.Header.Get("Snap-Device-Series"))
assert.Equal(t, tt.snapName, r.URL.Query().Get("name-startswith"))
w.WriteHeader(tt.statusCode)
if tt.statusCode == http.StatusOK {
responseBytes, err := json.Marshal(tt.mockResponse)
require.NoError(t, err)
w.Write(responseBytes)
}
}))
defer findServer.Close()
client := &snapcraftClient{
FindAPIURL: findServer.URL,
HTTPClient: &http.Client{},
}
exists, snapID, err := client.CheckSnapExists(tt.snapName)
tt.expectError(t, err)
if err != nil && tt.errorContains != "" {
assert.Contains(t, err.Error(), tt.errorContains)
return
}
assert.Equal(t, tt.expectedExists, exists)
assert.Equal(t, tt.expectedSnapID, snapID)
})
}
}
func TestSnapcraftClient_GetSnapDownloadURL(t *testing.T) {
tests := []struct {
name string
snapID snapIdentity
infoResponse snapcraftInfo
infoStatusCode int
findResponse *snapFindResponse
findStatusCode int
expectedURL string
expectError require.ErrorAssertionFunc
errorContains string
}{
{
name: "successful download URL retrieval",
snapID: snapIdentity{
Name: "etcd",
Channel: "stable",
Architecture: "amd64",
},
infoStatusCode: http.StatusOK,
infoResponse: snapcraftInfo{
ChannelMap: []snapChannelMapEntry{
{
Channel: snapChannel{
Architecture: "amd64",
Name: "stable",
},
Download: snapDownload{
URL: "https://api.snapcraft.io/api/v1/snaps/download/etcd_123.snap",
},
},
},
},
expectedURL: "https://api.snapcraft.io/api/v1/snaps/download/etcd_123.snap",
expectError: require.NoError,
},
{
name: "region-locked snap - exists but unavailable",
snapID: snapIdentity{
Name: "jp-ledger",
Channel: "stable",
Architecture: "amd64",
},
infoStatusCode: http.StatusNotFound,
findStatusCode: http.StatusOK,
findResponse: &snapFindResponse{
Results: []struct {
Name string `json:"name"`
SnapID string `json:"snap-id"`
Snap struct{} `json:"snap"`
}{
{
Name: "jp-ledger",
SnapID: "jyDlMmifyQhSWGPM9fnKc1HSD7E6c47e",
Snap: struct{}{},
},
},
},
expectError: require.Error,
errorContains: "found snap 'jp-ledger' (id=jyDlMmifyQhSWGPM9fnKc1HSD7E6c47e) but it is unavailable for download",
},
{
name: "snap truly does not exist",
snapID: snapIdentity{
Name: "nonexistent",
Channel: "stable",
Architecture: "amd64",
},
infoStatusCode: http.StatusNotFound,
findStatusCode: http.StatusOK,
findResponse: &snapFindResponse{
Results: []struct {
Name string `json:"name"`
SnapID string `json:"snap-id"`
Snap struct{} `json:"snap"`
}{},
},
expectError: require.Error,
errorContains: "no snap found with name 'nonexistent'",
},
{
name: "multiple architectures - find correct one",
snapID: snapIdentity{
Name: "mysql",
Channel: "stable",
Architecture: "arm64",
},
infoStatusCode: http.StatusOK,
infoResponse: snapcraftInfo{
ChannelMap: []snapChannelMapEntry{
{
Channel: snapChannel{
Architecture: "amd64",
Name: "stable",
},
Download: snapDownload{
URL: "https://api.snapcraft.io/api/v1/snaps/download/mysql_amd64.snap",
},
},
{
Channel: snapChannel{
Architecture: "arm64",
Name: "stable",
},
Download: snapDownload{
URL: "https://api.snapcraft.io/api/v1/snaps/download/mysql_arm64.snap",
},
},
},
},
expectedURL: "https://api.snapcraft.io/api/v1/snaps/download/mysql_arm64.snap",
expectError: require.NoError,
},
{
name: "snap not found - no matching architecture",
snapID: snapIdentity{
Name: "etcd",
Channel: "stable",
Architecture: "s390x",
},
infoStatusCode: http.StatusOK,
infoResponse: snapcraftInfo{
ChannelMap: []snapChannelMapEntry{
{
Channel: snapChannel{
Architecture: "amd64",
Name: "stable",
},
Download: snapDownload{
URL: "https://api.snapcraft.io/api/v1/snaps/download/etcd_123.snap",
},
},
},
},
expectError: require.Error,
errorContains: "no matching snap found",
},
{
name: "API returns 500",
snapID: snapIdentity{
Name: "etcd",
Channel: "stable",
Architecture: "amd64",
},
infoStatusCode: http.StatusInternalServerError,
expectError: require.Error,
errorContains: "API request failed with status code 500",
},
{
name: "find API fails when checking 404",
snapID: snapIdentity{
Name: "test-snap",
Channel: "stable",
Architecture: "amd64",
},
infoStatusCode: http.StatusNotFound,
findStatusCode: http.StatusInternalServerError,
expectError: require.Error,
errorContains: "failed to check if snap exists",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.expectError == nil {
tt.expectError = require.NoError
}
infoServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, defaultSeries, r.Header.Get("Snap-Device-Series"))
expectedPath := "/" + tt.snapID.Name
assert.Equal(t, expectedPath, r.URL.Path)
w.WriteHeader(tt.infoStatusCode)
if tt.infoStatusCode == http.StatusOK {
responseBytes, err := json.Marshal(tt.infoResponse)
require.NoError(t, err)
w.Write(responseBytes)
}
}))
defer infoServer.Close()
var findServer *httptest.Server
if tt.findResponse != nil || tt.findStatusCode != 0 {
findServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, defaultSeries, r.Header.Get("Snap-Device-Series"))
assert.Equal(t, tt.snapID.Name, r.URL.Query().Get("name-startswith"))
statusCode := tt.findStatusCode
if statusCode == 0 {
statusCode = http.StatusOK
}
w.WriteHeader(statusCode)
if tt.findResponse != nil && statusCode == http.StatusOK {
responseBytes, err := json.Marshal(tt.findResponse)
require.NoError(t, err)
w.Write(responseBytes)
}
}))
defer findServer.Close()
}
client := &snapcraftClient{
InfoAPIURL: infoServer.URL + "/",
HTTPClient: &http.Client{},
}
if findServer != nil {
client.FindAPIURL = findServer.URL
}
url, err := client.GetSnapDownloadURL(tt.snapID)
tt.expectError(t, err)
if err != nil {
if tt.errorContains != "" {
assert.Contains(t, err.Error(), tt.errorContains)
}
return
}
assert.Equal(t, tt.expectedURL, url)
})
}
}
func TestSnapcraftClient_GetSnapDownloadURL_InvalidJSON(t *testing.T) {
infoServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("invalid json"))
}))
defer infoServer.Close()
client := &snapcraftClient{
InfoAPIURL: infoServer.URL + "/",
HTTPClient: &http.Client{},
}
snapID := snapIdentity{
Name: "etcd",
Channel: "stable",
Architecture: "amd64",
}
_, err := client.GetSnapDownloadURL(snapID)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to parse JSON response")
}
func TestNewSnapcraftClient(t *testing.T) {
client := newSnapcraftClient()
assert.Equal(t, "https://api.snapcraft.io/v2/snaps/info/", client.InfoAPIURL)
assert.Equal(t, "https://api.snapcraft.io/v2/snaps/find", client.FindAPIURL)
assert.NotNil(t, client.HTTPClient)
}

View File

@ -7,6 +7,7 @@ import (
"github.com/anchore/syft/syft/source"
"github.com/anchore/syft/syft/source/directorysource"
"github.com/anchore/syft/syft/source/filesource"
"github.com/anchore/syft/syft/source/snapsource"
"github.com/anchore/syft/syft/source/stereoscopesource"
)
@ -14,6 +15,7 @@ const (
FileTag = stereoscope.FileTag
DirTag = stereoscope.DirTag
PullTag = stereoscope.PullTag
SnapTag = "snap"
)
// All returns all the configured source providers known to syft
@ -24,13 +26,25 @@ func All(userInput string, cfg *Config) []collections.TaggedValue[source.Provide
stereoscopeProviders := stereoscopeSourceProviders(userInput, cfg)
return collections.TaggedValueSet[source.Provider]{}.
// 1. try all specific, local sources first...
// --from file, dir, oci-archive, etc.
Join(stereoscopeProviders.Select(FileTag, DirTag)...).
// --from snap (local only)
Join(tagProvider(snapsource.NewLocalSourceProvider(userInput, cfg.Exclude, cfg.DigestAlgorithms, cfg.Alias), SnapTag)).
// 2. try unspecific, local sources after other local sources last...
Join(tagProvider(filesource.NewSourceProvider(userInput, cfg.Exclude, cfg.DigestAlgorithms, cfg.Alias), FileTag)).
Join(tagProvider(directorysource.NewSourceProvider(userInput, cfg.Exclude, cfg.Alias, cfg.BasePath), DirTag)).
// 3. try remote sources after everything else...
// --from docker, registry, etc.
Join(stereoscopeProviders.Select(PullTag)...)
Join(stereoscopeProviders.Select(PullTag)...).
// --from snap (remote only)
Join(tagProvider(snapsource.NewRemoteSourceProvider(userInput, cfg.Exclude, cfg.DigestAlgorithms, cfg.Alias), SnapTag))
}
func stereoscopeSourceProviders(userInput string, cfg *Config) collections.TaggedValueSet[source.Provider] {