Add deep-squashed scope to annotate all layers where a package exists (#3138)

* add squash all layers resolver

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* add squash with all layers logic

Signed-off-by: tomersein <tomersein@gmail.com>

* add squash with all layers logic

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squashed all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squash with all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* squash with all layers

Signed-off-by: tomersein <tomersein@gmail.com>

* adjust resolver to strictly return squash paths only

Signed-off-by: Alex Goodman <wagoodman@users.noreply.github.com>

* show all packages have locations + primary evidence

Signed-off-by: Alex Goodman <wagoodman@users.noreply.github.com>

* fix race condition in test

Signed-off-by: Alex Goodman <wagoodman@users.noreply.github.com>

* consider access paths

Signed-off-by: Alex Goodman <wagoodman@users.noreply.github.com>

---------

Signed-off-by: tomersein <tomersein@gmail.com>
Signed-off-by: Alex Goodman <wagoodman@users.noreply.github.com>
Co-authored-by: Alex Goodman <wagoodman@users.noreply.github.com>
This commit is contained in:
GGMU 2025-05-05 21:35:57 +03:00 committed by GitHub
parent e13c9e7813
commit 6db60c5975
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 1558 additions and 78 deletions

View File

@ -12,6 +12,7 @@ import (
"github.com/anchore/stereoscope/pkg/imagetest" "github.com/anchore/stereoscope/pkg/imagetest"
"github.com/anchore/syft/syft" "github.com/anchore/syft/syft"
"github.com/anchore/syft/syft/pkg" "github.com/anchore/syft/syft/pkg"
"github.com/anchore/syft/syft/sbom"
"github.com/anchore/syft/syft/source" "github.com/anchore/syft/syft/source"
) )
@ -93,40 +94,7 @@ func TestPkgCoverageImage(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
pkgCount := 0 assertPackages(t, sbom, c, observedLanguages, observedPkgs)
for a := range sbom.Artifacts.Packages.Enumerate(c.pkgType) {
if a.Language.String() != "" {
observedLanguages.Add(a.Language.String())
}
observedPkgs.Add(string(a.Type))
expectedVersion, ok := c.pkgInfo[a.Name]
if !ok {
t.Errorf("unexpected package found: %s", a.Name)
}
if expectedVersion != a.Version {
t.Errorf("unexpected package version (pkg=%s): %s, expected: %s", a.Name, a.Version, expectedVersion)
}
if a.Language != c.pkgLanguage {
t.Errorf("bad language (pkg=%+v): %+v", a.Name, a.Language)
}
if a.Type != c.pkgType {
t.Errorf("bad package type (pkg=%+v): %+v", a.Name, a.Type)
}
pkgCount++
}
if pkgCount != len(c.pkgInfo)+c.duplicates {
t.Logf("Discovered packages of type %+v", c.pkgType)
for a := range sbom.Artifacts.Packages.Enumerate(c.pkgType) {
t.Log(" ", a)
}
t.Fatalf("unexpected package count: %d!=%d", pkgCount, len(c.pkgInfo))
}
}) })
} }
@ -176,6 +144,37 @@ func TestPkgCoverageDirectory(t *testing.T) {
for _, test := range cases { for _, test := range cases {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
assertPackages(t, sbom, test, observedLanguages, observedPkgs)
})
}
observedLanguages.Remove(pkg.UnknownLanguage.String())
definedLanguages.Remove(pkg.UnknownLanguage.String())
definedLanguages.Remove(pkg.R.String())
observedPkgs.Remove(string(pkg.UnknownPkg))
definedPkgs.Remove(string(pkg.BinaryPkg))
definedPkgs.Remove(string(pkg.BitnamiPkg))
definedPkgs.Remove(string(pkg.GraalVMNativeImagePkg))
definedPkgs.Remove(string(pkg.LinuxKernelPkg))
definedPkgs.Remove(string(pkg.LinuxKernelModulePkg))
definedPkgs.Remove(string(pkg.Rpkg))
definedPkgs.Remove(string(pkg.UnknownPkg))
definedPkgs.Remove(string(pkg.PhpPeclPkg)) // this is covered as pear packages
// for directory scans we should not expect to see any of the following package types
definedPkgs.Remove(string(pkg.KbPkg))
// ensure that integration test commonTestCases stay in sync with the available catalogers
if observedLanguages.Size() < definedLanguages.Size() {
t.Errorf("language coverage incomplete (languages=%d, coverage=%d)", definedLanguages.Size(), observedLanguages.Size())
}
if observedPkgs.Size() < definedPkgs.Size() {
t.Errorf("package coverage incomplete (packages=%d, coverage=%d)", definedPkgs.Size(), observedPkgs.Size())
}
}
func assertPackages(t *testing.T, sbom sbom.SBOM, test testCase, observedLanguages *strset.Set, observedPkgs *strset.Set) {
actualPkgCount := 0 actualPkgCount := 0
for actualPkg := range sbom.Artifacts.Packages.Enumerate(test.pkgType) { for actualPkg := range sbom.Artifacts.Packages.Enumerate(test.pkgType) {
@ -206,6 +205,20 @@ func TestPkgCoverageDirectory(t *testing.T) {
t.Errorf("bad package type (pkg=%+v): %+v", actualPkg.Name, actualPkg.Type) t.Errorf("bad package type (pkg=%+v): %+v", actualPkg.Name, actualPkg.Type)
} }
actualPkgCount++ actualPkgCount++
// all packages should have at least one location associated with it, and of those locations at least one should be primary evidence
locs := actualPkg.Locations.ToSlice()
assert.NotEmpty(t, locs, "package %q has no locations (type=%q)", actualPkg.Name, actualPkg.Type)
var primaryEvidenceFound bool
for _, l := range locs {
if _, exists := l.Annotations[pkg.EvidenceAnnotationKey]; !exists {
t.Errorf("missing evidence annotation (pkg=%s type=%s)", actualPkg.Name, actualPkg.Type)
}
if l.Annotations[pkg.EvidenceAnnotationKey] == pkg.PrimaryEvidenceAnnotation {
primaryEvidenceFound = true
}
}
assert.True(t, primaryEvidenceFound, "no primary evidence found for package %q", actualPkg.Name)
} }
if actualPkgCount != len(test.pkgInfo)+test.duplicates { if actualPkgCount != len(test.pkgInfo)+test.duplicates {
@ -214,33 +227,6 @@ func TestPkgCoverageDirectory(t *testing.T) {
} }
t.Fatalf("unexpected package count: %d!=%d", actualPkgCount, len(test.pkgInfo)) t.Fatalf("unexpected package count: %d!=%d", actualPkgCount, len(test.pkgInfo))
} }
})
}
observedLanguages.Remove(pkg.UnknownLanguage.String())
definedLanguages.Remove(pkg.UnknownLanguage.String())
definedLanguages.Remove(pkg.R.String())
observedPkgs.Remove(string(pkg.UnknownPkg))
definedPkgs.Remove(string(pkg.BinaryPkg))
definedPkgs.Remove(string(pkg.BitnamiPkg))
definedPkgs.Remove(string(pkg.GraalVMNativeImagePkg))
definedPkgs.Remove(string(pkg.LinuxKernelPkg))
definedPkgs.Remove(string(pkg.LinuxKernelModulePkg))
definedPkgs.Remove(string(pkg.Rpkg))
definedPkgs.Remove(string(pkg.UnknownPkg))
definedPkgs.Remove(string(pkg.PhpPeclPkg)) // this is covered as pear packages
// for directory scans we should not expect to see any of the following package types
definedPkgs.Remove(string(pkg.KbPkg))
// ensure that integration test commonTestCases stay in sync with the available catalogers
if observedLanguages.Size() < definedLanguages.Size() {
t.Errorf("language coverage incomplete (languages=%d, coverage=%d)", definedLanguages.Size(), observedLanguages.Size())
}
if observedPkgs.Size() < definedPkgs.Size() {
t.Errorf("package coverage incomplete (packages=%d, coverage=%d)", definedPkgs.Size(), observedPkgs.Size())
}
} }
func TestPkgCoverageImage_HasEvidence(t *testing.T) { func TestPkgCoverageImage_HasEvidence(t *testing.T) {

View File

@ -0,0 +1,63 @@
package task
import (
"context"
"fmt"
"github.com/anchore/syft/internal/sbomsync"
"github.com/anchore/syft/syft/artifact"
"github.com/anchore/syft/syft/file"
"github.com/anchore/syft/syft/pkg"
"github.com/anchore/syft/syft/sbom"
)
func NewDeepSquashedScopeCleanupTask() Task {
fn := func(_ context.Context, _ file.Resolver, builder sbomsync.Builder) error {
accessor := builder.(sbomsync.Accessor)
// remove all packages that doesn't exist in the final state of the image
builder.DeletePackages(packagesToRemove(accessor)...)
return nil
}
return NewTask("deep-squashed-cleaner", fn)
}
func packagesToRemove(accessor sbomsync.Accessor) []artifact.ID {
pkgsToDelete := make([]artifact.ID, 0)
accessor.ReadFromSBOM(func(s *sbom.SBOM) {
filterDuplicates := make(map[string]bool)
for p := range s.Artifacts.Packages.Enumerate() {
noSquashed := true
noPrimary := true
for _, l := range p.Locations.ToSlice() {
isPrimaryEvidence := l.Annotations[pkg.EvidenceAnnotationKey] == pkg.PrimaryEvidenceAnnotation
switch l.Annotations[file.VisibleAnnotationKey] {
case file.VisibleAnnotation:
if isPrimaryEvidence || p.Type == pkg.BinaryPkg {
noSquashed = false
break
}
case "":
if isPrimaryEvidence {
if exists := filterDuplicates[getKey(p, l)]; exists {
break
}
filterDuplicates[getKey(p, l)] = true
noPrimary = false
break
}
}
}
if noSquashed && noPrimary {
pkgsToDelete = append(pkgsToDelete, p.ID())
}
}
})
return pkgsToDelete
}
func getKey(pkg pkg.Package, loc file.Location) string {
return fmt.Sprintf("%s-%s-%s-%s", pkg.Name, pkg.Version, loc.RealPath, loc.AccessPath)
}

View File

@ -188,6 +188,7 @@ func (c *CreateSBOMConfig) makeTaskGroups(src source.Description) ([][]task.Task
// generate package and file tasks based on the configuration // generate package and file tasks based on the configuration
environmentTasks := c.environmentTasks() environmentTasks := c.environmentTasks()
scopeTasks := c.scopeTasks()
relationshipsTasks := c.relationshipTasks(src) relationshipsTasks := c.relationshipTasks(src)
unknownTasks := c.unknownsTasks() unknownTasks := c.unknownsTasks()
@ -204,6 +205,11 @@ func (c *CreateSBOMConfig) makeTaskGroups(src source.Description) ([][]task.Task
taskGroups = append(taskGroups, append(pkgTasks, fileTasks...)) taskGroups = append(taskGroups, append(pkgTasks, fileTasks...))
} }
// all scope work must be done after all nodes (files and packages) have been cataloged and before the relationship
if len(scopeTasks) > 0 {
taskGroups = append(taskGroups, scopeTasks)
}
// all relationship work must be done after all nodes (files and packages) have been cataloged // all relationship work must be done after all nodes (files and packages) have been cataloged
if len(relationshipsTasks) > 0 { if len(relationshipsTasks) > 0 {
taskGroups = append(taskGroups, relationshipsTasks) taskGroups = append(taskGroups, relationshipsTasks)
@ -391,6 +397,17 @@ func (c *CreateSBOMConfig) userPackageTasks(cfg task.CatalogingFactoryConfig) ([
return persistentPackageTasks, selectablePackageTasks, nil return persistentPackageTasks, selectablePackageTasks, nil
} }
// scopeTasks returns the set of tasks that should be run to generate additional scope information
func (c *CreateSBOMConfig) scopeTasks() []task.Task {
var tsks []task.Task
if c.Search.Scope == source.DeepSquashedScope {
if t := task.NewDeepSquashedScopeCleanupTask(); t != nil {
tsks = append(tsks, t)
}
}
return tsks
}
// relationshipTasks returns the set of tasks that should be run to generate additional relationships as well as // relationshipTasks returns the set of tasks that should be run to generate additional relationships as well as
// prune existing relationships. // prune existing relationships.
func (c *CreateSBOMConfig) relationshipTasks(src source.Description) []task.Task { func (c *CreateSBOMConfig) relationshipTasks(src source.Description) []task.Task {

View File

@ -9,6 +9,17 @@ import (
"github.com/anchore/stereoscope/pkg/image" "github.com/anchore/stereoscope/pkg/image"
) )
const (
// VisibleAnnotationKey is the key used to indicate if the location is visible or not at runtime
VisibleAnnotationKey = "visible"
// HiddenAnnotation is the value used to indicate that the location is not visible at runtime because it was deleted
HiddenAnnotation = "false"
// VisibleAnnotation is the value used to indicate that the location is visible at runtime
VisibleAnnotation = "true"
)
// Location represents a path relative to a particular filesystem resolved to a specific file.Reference. This struct is used as a key // Location represents a path relative to a particular filesystem resolved to a specific file.Reference. This struct is used as a key
// in content fetching to uniquely identify a file relative to a request (the AccessPath). // in content fetching to uniquely identify a file relative to a request (the AccessPath).
type Location struct { type Location struct {
@ -48,6 +59,9 @@ func (m *LocationMetadata) merge(other LocationMetadata) error {
} }
func (l Location) WithAnnotation(key, value string) Location { func (l Location) WithAnnotation(key, value string) Location {
if key == "" || value == "" {
return l
}
if l.Annotations == nil { if l.Annotations == nil {
l.Annotations = map[string]string{} l.Annotations = map[string]string{}
} }

View File

@ -0,0 +1,252 @@
package fileresolver
import (
"context"
"io"
"github.com/anchore/stereoscope/pkg/image"
"github.com/anchore/syft/syft/file"
)
var _ file.Resolver = (*ContainerImageDeepSquash)(nil)
// ContainerImageDeepSquash implements path and content access for the paths in the squashed tree, but with additional
// depth from all layers. The goal of this is to allow for producing results where the first layer which the material
// was added can be annotated in the SBOM (as opposed to the last [visible] layer for the path like with the squashed
// file resolver).
type ContainerImageDeepSquash struct {
squashed file.Resolver
allLayers file.Resolver
}
// NewFromContainerImageDeepSquash returns a new resolver from the perspective of all image layers for the given image.
func NewFromContainerImageDeepSquash(img *image.Image) (*ContainerImageDeepSquash, error) {
squashed, err := NewFromContainerImageSquash(img)
if err != nil {
return nil, err
}
allLayers, err := NewFromContainerImageAllLayers(img)
if err != nil {
return nil, err
}
return &ContainerImageDeepSquash{
squashed: squashed,
allLayers: allLayers,
}, nil
}
// HasPath indicates if the given path exists in the underlying source.
func (i *ContainerImageDeepSquash) HasPath(path string) bool {
// there is no need to merge results from all layers since path-based results should always be adjusted relative to the squashed tree (which is different when considering layers)
return i.squashed.HasPath(path)
}
// FilesByPath returns all file.References that match the given paths from any layer in the image.
func (i *ContainerImageDeepSquash) FilesByPath(paths ...string) ([]file.Location, error) {
squashedLocations, err := i.squashed.FilesByPath(paths...)
if err != nil {
return nil, err
}
if len(squashedLocations) == 0 {
// this is meant to return all files in all layers only for paths that are present in the squashed tree. If
// there are no results from the squashed tree then there are no paths to raise up.
return nil, nil
}
allLayersLocations, err := i.allLayers.FilesByPath(paths...)
if err != nil {
return nil, err
}
return i.mergeLocations(squashedLocations, allLayersLocations), nil
}
// FilesByGlob returns all file.References that match the given path glob pattern from any layer in the image.
func (i *ContainerImageDeepSquash) FilesByGlob(patterns ...string) ([]file.Location, error) {
squashedLocations, err := i.squashed.FilesByGlob(patterns...)
if err != nil {
return nil, err
}
if len(squashedLocations) == 0 {
// this is meant to return all files in all layers only for paths that are present in the squashed tree. If
// there are no results from the squashed tree then there are no paths to raise up.
return nil, nil
}
allLayersLocations, err := i.allLayers.FilesByGlob(patterns...)
if err != nil {
return nil, err
}
return i.mergeLocations(squashedLocations, allLayersLocations), nil
}
// RelativeFileByPath fetches a single file at the given path relative to the layer squash of the given reference.
// This is helpful when attempting to find a file that is in the same layer or lower as another file.
func (i *ContainerImageDeepSquash) RelativeFileByPath(location file.Location, path string) *file.Location {
if !i.squashed.HasPath(path) {
return nil
}
l := i.squashed.RelativeFileByPath(location, path)
if l != nil {
loc := l.WithAnnotation(file.VisibleAnnotationKey, file.VisibleAnnotation)
return &loc
}
l = i.allLayers.RelativeFileByPath(location, path)
if l != nil {
loc := l.WithAnnotation(file.VisibleAnnotationKey, file.HiddenAnnotation)
return &loc
}
return nil
}
// FileContentsByLocation fetches file contents for a single file reference.
// If the path does not exist an error is returned.
func (i *ContainerImageDeepSquash) FileContentsByLocation(location file.Location) (io.ReadCloser, error) {
// regardless of the layer or scope, if the user gives us a specific path+layer location, then we should always
// return the contents for that specific location (thus all-layers scope must always be used)
return i.allLayers.FileContentsByLocation(location)
}
func (i *ContainerImageDeepSquash) FilesByMIMEType(types ...string) ([]file.Location, error) {
squashedLocations, err := i.squashed.FilesByMIMEType(types...)
if err != nil {
return nil, err
}
if len(squashedLocations) == 0 {
// this is meant to return all files in all layers only for paths that are present in the squashed tree. If
// there are no results from the squashed tree then there are no paths to raise up.
return nil, nil
}
allLayersLocations, err := i.allLayers.FilesByMIMEType(types...)
if err != nil {
return nil, err
}
return i.mergeLocations(squashedLocations, allLayersLocations), nil
}
func (i *ContainerImageDeepSquash) AllLocations(ctx context.Context) <-chan file.Location {
return i.mergeLocationStreams(ctx, i.squashed.AllLocations(ctx), i.allLayers.AllLocations(ctx))
}
func (i *ContainerImageDeepSquash) FileMetadataByLocation(location file.Location) (file.Metadata, error) {
// regardless of the layer or scope, if the user gives us a specific path+layer location, then we should always
// return the metadata for that specific location (thus all-layers scope must always be used)
return i.allLayers.FileMetadataByLocation(location)
}
func (i *ContainerImageDeepSquash) mergeLocations(squashedLocations, allLayersLocations []file.Location) []file.Location {
var result []file.Location
if len(squashedLocations) == 0 {
// this is meant to return all files in all layers only for paths that are present in the squashed tree. If
// there are no results from the squashed tree then there are no paths to raise up.
return nil
}
// we are using a location set to deduplicate locations, but we don't use it for the returned
// results in order to preserve the order of the locations from the underlying filetree query
squashedCoords := file.NewLocationSet()
for _, l := range squashedLocations {
result = append(result, l.WithAnnotation(file.VisibleAnnotationKey, file.VisibleAnnotation))
squashedCoords.Add(l)
}
for _, l := range allLayersLocations {
if squashedCoords.Contains(l) {
// this path + layer is already in the squashed tree results, skip it (deduplicate location results)
continue
}
if !i.squashed.HasPath(l.RealPath) {
// if we find a location for a path that matches the query (e.g. **/node_modules) but is not present in the squashed tree, skip it
continue
}
// not only should the real path to the file exist, but the way we took to get there should also exist
// (e.g. if we are looking for /etc/passwd, but the real path is /etc/passwd -> /etc/passwd-1, then we should
// make certain that /etc/passwd-1 exists)
if l.AccessPath != "" && !i.squashed.HasPath(l.AccessPath) {
continue
}
result = append(result, l.WithAnnotation(file.VisibleAnnotationKey, file.HiddenAnnotation))
}
return result
}
func (i *ContainerImageDeepSquash) mergeLocationStreams(ctx context.Context, squashedLocations, allLayersLocations <-chan file.Location) <-chan file.Location {
result := make(chan file.Location)
go func() {
defer close(result)
// we are using a location set to deduplicate locations, but we don't use it for the returned
// results in order to preserve the order of the locations from the underlying filetree query
squashedCoords := file.NewLocationSet()
var isDone bool
for l := range squashedLocations {
if isDone {
// bleed off the rest of the results from the squashed stream and not leak a goroutine
continue
}
select {
case <-ctx.Done():
isDone = true
default:
result <- l.WithAnnotation(file.VisibleAnnotationKey, file.VisibleAnnotation)
squashedCoords.Add(l)
}
}
for l := range allLayersLocations {
if isDone {
// bleed off the rest of the results from the squashed stream and not leak a goroutine
continue
}
if squashedCoords.Empty() {
// this is meant to return all files in all layers only for paths that are present in the squashed tree.
// If there are no results from the squashed tree, then there are no paths to raise up.
// That being said, we need to bleed off the rest of the results from the allLayersLocations stream
// and not leak a goroutine.
continue
}
if squashedCoords.Contains(l) {
// we've already seen this location from the squashed stream, skip it
continue
}
if !i.squashed.HasPath(l.RealPath) {
// if we find a location for a path that matches the query (e.g. **/node_modules) but is not present in the squashed tree, skip it
continue
}
// not only should the real path to the file exist, but the way we took to get there should also exist
// (e.g. if we are looking for /etc/passwd, but the real path is /etc/passwd -> /etc/passwd-1, then we should
// make certain that /etc/passwd-1 exists)
if l.AccessPath != "" && !i.squashed.HasPath(l.AccessPath) {
continue
}
select {
case <-ctx.Done():
isDone = true
default:
result <- l.WithAnnotation(file.VisibleAnnotationKey, file.HiddenAnnotation)
}
}
}()
return result
}

File diff suppressed because it is too large Load Diff

View File

@ -1542,8 +1542,6 @@ func Test_fileResolver_FileContentsByLocation(t *testing.T) {
} }
func TestFileResolver_AllLocations_errorOnDirRequest(t *testing.T) { func TestFileResolver_AllLocations_errorOnDirRequest(t *testing.T) {
defer goleak.VerifyNone(t)
filePath := "./test-fixtures/system_paths/target/home/place" filePath := "./test-fixtures/system_paths/target/home/place"
parentPath, err := absoluteSymlinkFreePathToParent(filePath) parentPath, err := absoluteSymlinkFreePathToParent(filePath)
require.NoError(t, err) require.NoError(t, err)
@ -1557,9 +1555,8 @@ func TestFileResolver_AllLocations_errorOnDirRequest(t *testing.T) {
for loc := range resolver.AllLocations(ctx) { for loc := range resolver.AllLocations(ctx) {
entry, err := resolver.index.Get(loc.Reference()) entry, err := resolver.index.Get(loc.Reference())
require.NoError(t, err) require.NoError(t, err)
if entry.Metadata.IsDir() { if dirLoc == nil && entry.Metadata.IsDir() {
dirLoc = &loc dirLoc = &loc
break
} }
} }
@ -1568,6 +1565,8 @@ func TestFileResolver_AllLocations_errorOnDirRequest(t *testing.T) {
reader, err := resolver.FileContentsByLocation(*dirLoc) reader, err := resolver.FileContentsByLocation(*dirLoc)
require.Error(t, err) require.Error(t, err)
require.Nil(t, reader) require.Nil(t, reader)
goleak.VerifyNone(t)
} }
func TestFileResolver_AllLocations(t *testing.T) { func TestFileResolver_AllLocations(t *testing.T) {
@ -1592,10 +1591,11 @@ func TestFileResolver_AllLocations(t *testing.T) {
sort.Strings(pathsList) sort.Strings(pathsList)
assert.ElementsMatchf(t, expected, pathsList, "expected all paths to be indexed, but found different paths: \n%s", cmp.Diff(expected, paths.List())) assert.ElementsMatchf(t, expected, pathsList, "expected all paths to be indexed, but found different paths: \n%s", cmp.Diff(expected, paths.List()))
goleak.VerifyNone(t)
} }
func Test_FileResolver_AllLocationsDoesNotLeakGoRoutine(t *testing.T) { func Test_FileResolver_AllLocationsDoesNotLeakGoRoutine(t *testing.T) {
defer goleak.VerifyNone(t)
filePath := "./test-fixtures/system_paths/target/home/place" filePath := "./test-fixtures/system_paths/target/home/place"
parentPath, err := absoluteSymlinkFreePathToParent(filePath) parentPath, err := absoluteSymlinkFreePathToParent(filePath)
require.NoError(t, err) require.NoError(t, err)
@ -1609,4 +1609,6 @@ func Test_FileResolver_AllLocationsDoesNotLeakGoRoutine(t *testing.T) {
break break
} }
cancel() cancel()
goleak.VerifyNone(t)
} }

View File

@ -12,12 +12,15 @@ const (
SquashedScope Scope = "squashed" SquashedScope Scope = "squashed"
// AllLayersScope indicates to catalog content on all layers, regardless if it is visible from the container at runtime. // AllLayersScope indicates to catalog content on all layers, regardless if it is visible from the container at runtime.
AllLayersScope Scope = "all-layers" AllLayersScope Scope = "all-layers"
// DeepSquashedScope indicates to catalog content on all layers, but only include content visible from the squashed filesystem representation.
DeepSquashedScope Scope = "deep-squashed"
) )
// AllScopes is a slice containing all possible scope options // AllScopes is a slice containing all possible scope options
var AllScopes = []Scope{ var AllScopes = []Scope{
SquashedScope, SquashedScope,
AllLayersScope, AllLayersScope,
DeepSquashedScope,
} }
// ParseScope returns a scope as indicated from the given string. // ParseScope returns a scope as indicated from the given string.
@ -25,8 +28,10 @@ func ParseScope(userStr string) Scope {
switch strings.ToLower(userStr) { switch strings.ToLower(userStr) {
case SquashedScope.String(): case SquashedScope.String():
return SquashedScope return SquashedScope
case "alllayers", AllLayersScope.String(): case "all", "alllayers", AllLayersScope.String():
return AllLayersScope return AllLayersScope
case "deepsquashed", "squasheddeep", "squashed-deep", "deep-squash", "deepsquash", strings.ToLower(DeepSquashedScope.String()):
return DeepSquashedScope
} }
return UnknownScope return UnknownScope
} }

View File

@ -20,6 +20,11 @@ func TestParseScope(t *testing.T) {
name: "all-layers", name: "all-layers",
want: AllLayersScope, want: AllLayersScope,
}, },
{
name: "deep-squashed",
want: DeepSquashedScope,
},
// fall back to unknown // fall back to unknown
{ {
name: "make-believe", name: "make-believe",
@ -48,6 +53,31 @@ func TestParseScope(t *testing.T) {
name: "alLlaYerS", name: "alLlaYerS",
want: AllLayersScope, want: AllLayersScope,
}, },
// aliases
{
name: "all",
want: AllLayersScope,
},
{
name: "deep-squash",
want: DeepSquashedScope,
},
{
name: "deepsquashed",
want: DeepSquashedScope,
},
{
name: "squasheddeep",
want: DeepSquashedScope,
},
{
name: "squashed-deep",
want: DeepSquashedScope,
},
{
name: "deepsquash",
want: DeepSquashedScope,
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {

View File

@ -103,6 +103,8 @@ func (s stereoscopeImageSource) FileResolver(scope source.Scope) (file.Resolver,
res, err = fileresolver.NewFromContainerImageSquash(s.image) res, err = fileresolver.NewFromContainerImageSquash(s.image)
case source.AllLayersScope: case source.AllLayersScope:
res, err = fileresolver.NewFromContainerImageAllLayers(s.image) res, err = fileresolver.NewFromContainerImageAllLayers(s.image)
case source.DeepSquashedScope:
res, err = fileresolver.NewFromContainerImageDeepSquash(s.image)
default: default:
return nil, fmt.Errorf("bad image scope provided: %+v", scope) return nil, fmt.Errorf("bad image scope provided: %+v", scope)
} }