mirror of
https://github.com/anchore/syft.git
synced 2025-11-17 16:33:21 +01:00
chore(deps): update tools to latest versions (#3144)
This commit is contained in:
parent
cff9d494df
commit
dad253785e
@ -26,7 +26,7 @@ tools:
|
|||||||
# used for linting
|
# used for linting
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
version:
|
version:
|
||||||
want: v1.60.1
|
want: v1.60.3
|
||||||
method: github-release
|
method: github-release
|
||||||
with:
|
with:
|
||||||
repo: golangci/golangci-lint
|
repo: golangci/golangci-lint
|
||||||
@ -111,7 +111,7 @@ tools:
|
|||||||
# used for triggering a release
|
# used for triggering a release
|
||||||
- name: gh
|
- name: gh
|
||||||
version:
|
version:
|
||||||
want: v2.54.0
|
want: v2.55.0
|
||||||
method: github-release
|
method: github-release
|
||||||
with:
|
with:
|
||||||
repo: cli/cli
|
repo: cli/cli
|
||||||
|
|||||||
@ -12,10 +12,10 @@ linters:
|
|||||||
enable:
|
enable:
|
||||||
- asciicheck
|
- asciicheck
|
||||||
- bodyclose
|
- bodyclose
|
||||||
|
- copyloopvar
|
||||||
- dogsled
|
- dogsled
|
||||||
- dupl
|
- dupl
|
||||||
- errcheck
|
- errcheck
|
||||||
- exportloopref
|
|
||||||
- funlen
|
- funlen
|
||||||
- gocognit
|
- gocognit
|
||||||
- goconst
|
- goconst
|
||||||
@ -30,6 +30,7 @@ linters:
|
|||||||
- ineffassign
|
- ineffassign
|
||||||
- misspell
|
- misspell
|
||||||
- nakedret
|
- nakedret
|
||||||
|
- nolintlint
|
||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- stylecheck
|
- stylecheck
|
||||||
@ -80,7 +81,6 @@ run:
|
|||||||
# - lll # without a way to specify per-line exception cases, this is not usable
|
# - lll # without a way to specify per-line exception cases, this is not usable
|
||||||
# - maligned # this is an excellent linter, but tricky to optimize and we are not sensitive to memory layout optimizations
|
# - maligned # this is an excellent linter, but tricky to optimize and we are not sensitive to memory layout optimizations
|
||||||
# - nestif
|
# - nestif
|
||||||
# - nolintlint # as of go1.19 this conflicts with the behavior of gofmt, which is a deal-breaker (lint-fix will still fail when running lint)
|
|
||||||
# - prealloc # following this rule isn't consistently a good idea, as it sometimes forces unnecessary allocations that result in less idiomatic code
|
# - prealloc # following this rule isn't consistently a good idea, as it sometimes forces unnecessary allocations that result in less idiomatic code
|
||||||
# - rowserrcheck # not in a repo with sql, so this is not useful
|
# - rowserrcheck # not in a repo with sql, so this is not useful
|
||||||
# - scopelint # deprecated
|
# - scopelint # deprecated
|
||||||
|
|||||||
@ -93,14 +93,13 @@ func defaultAttestOutputOptions() options.Output {
|
|||||||
string(spdxtagvalue.ID),
|
string(spdxtagvalue.ID),
|
||||||
},
|
},
|
||||||
Outputs: []string{syftjson.ID.String()},
|
Outputs: []string{syftjson.ID.String()},
|
||||||
OutputFile: options.OutputFile{ // nolint:staticcheck
|
OutputFile: options.OutputFile{ //nolint:staticcheck
|
||||||
Enabled: false, // explicitly not allowed
|
Enabled: false, // explicitly not allowed
|
||||||
},
|
},
|
||||||
Format: options.DefaultFormat(),
|
Format: options.DefaultFormat(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:funlen
|
|
||||||
func runAttest(ctx context.Context, id clio.Identification, opts *attestOptions, userInput string) error {
|
func runAttest(ctx context.Context, id clio.Identification, opts *attestOptions, userInput string) error {
|
||||||
// TODO: what other validation here besides binary name?
|
// TODO: what other validation here besides binary name?
|
||||||
if !commandExists(cosignBinName) {
|
if !commandExists(cosignBinName) {
|
||||||
|
|||||||
@ -28,7 +28,6 @@ type ConvertOptions struct {
|
|||||||
options.UpdateCheck `yaml:",inline" mapstructure:",squash"`
|
options.UpdateCheck `yaml:",inline" mapstructure:",squash"`
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:dupl
|
|
||||||
func Convert(app clio.Application) *cobra.Command {
|
func Convert(app clio.Application) *cobra.Command {
|
||||||
id := app.ID()
|
id := app.ID()
|
||||||
|
|
||||||
|
|||||||
@ -80,7 +80,6 @@ func defaultScanOptions() *scanOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:dupl
|
|
||||||
func Scan(app clio.Application) *cobra.Command {
|
func Scan(app clio.Application) *cobra.Command {
|
||||||
id := app.ID()
|
id := app.ID()
|
||||||
|
|
||||||
@ -396,13 +395,13 @@ func getExplanation(expErr task.ErrInvalidExpression) string {
|
|||||||
|
|
||||||
if errors.Is(err, task.ErrNamesNotAllowed) {
|
if errors.Is(err, task.ErrNamesNotAllowed) {
|
||||||
if expErr.Operation == task.SubSelectOperation {
|
if expErr.Operation == task.SubSelectOperation {
|
||||||
return "However, " + err.Error() + ".\nIt seems like you are intending to add a cataloger in addition to the default set." // nolint:goconst
|
return "However, " + err.Error() + ".\nIt seems like you are intending to add a cataloger in addition to the default set."
|
||||||
}
|
}
|
||||||
return "However, " + err.Error() + "." // nolint:goconst
|
return "However, " + err.Error() + "."
|
||||||
}
|
}
|
||||||
|
|
||||||
if errors.Is(err, task.ErrTagsNotAllowed) {
|
if errors.Is(err, task.ErrTagsNotAllowed) {
|
||||||
return "However, " + err.Error() + ".\nAdding groups of catalogers may result in surprising behavior (create inaccurate SBOMs)." // nolint:goconst
|
return "However, " + err.Error() + ".\nAdding groups of catalogers may result in surprising behavior (create inaccurate SBOMs)."
|
||||||
}
|
}
|
||||||
|
|
||||||
if errors.Is(err, task.ErrAllNotAllowed) {
|
if errors.Is(err, task.ErrAllNotAllowed) {
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -52,9 +53,14 @@ func OpenZip(filepath string) (*ZipReadCloser, error) {
|
|||||||
return nil, fmt.Errorf("unable to seek to beginning of archive: %w", err)
|
return nil, fmt.Errorf("unable to seek to beginning of archive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
size := fi.Size() - int64(offset)
|
if offset > math.MaxInt64 {
|
||||||
|
return nil, fmt.Errorf("archive start offset too large: %v", offset)
|
||||||
|
}
|
||||||
|
offset64 := int64(offset) //nolint:gosec // lint bug, checked above: https://github.com/securego/gosec/issues/1187
|
||||||
|
|
||||||
r, err := zip.NewReader(io.NewSectionReader(f, int64(offset), size), size)
|
size := fi.Size() - offset64
|
||||||
|
|
||||||
|
r, err := zip.NewReader(io.NewSectionReader(f, offset64, size), size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to open ZipReadCloser @ %q: %w", filepath, err)
|
return nil, fmt.Errorf("unable to open ZipReadCloser @ %q: %w", filepath, err)
|
||||||
}
|
}
|
||||||
@ -95,8 +101,6 @@ type directoryEnd struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// note: this is derived from readDirectoryEnd within the archive/zip package
|
// note: this is derived from readDirectoryEnd within the archive/zip package
|
||||||
//
|
|
||||||
//nolint:gocognit
|
|
||||||
func findArchiveStartOffset(r io.ReaderAt, size int64) (startOfArchive uint64, err error) {
|
func findArchiveStartOffset(r io.ReaderAt, size int64) (startOfArchive uint64, err error) {
|
||||||
// look for directoryEndSignature in the last 1k, then in the last 65k
|
// look for directoryEndSignature in the last 1k, then in the last 65k
|
||||||
var buf []byte
|
var buf []byte
|
||||||
@ -150,7 +154,7 @@ func findArchiveStartOffset(r io.ReaderAt, size int64) (startOfArchive uint64, e
|
|||||||
startOfArchive = uint64(directoryEndOffset) - d.directorySize - d.directoryOffset
|
startOfArchive = uint64(directoryEndOffset) - d.directorySize - d.directoryOffset
|
||||||
|
|
||||||
// Make sure directoryOffset points to somewhere in our file.
|
// Make sure directoryOffset points to somewhere in our file.
|
||||||
if o := int64(d.directoryOffset); o < 0 || o >= size {
|
if d.directoryOffset >= uint64(size) {
|
||||||
return 0, zip.ErrFormat
|
return 0, zip.ErrFormat
|
||||||
}
|
}
|
||||||
return startOfArchive, nil
|
return startOfArchive, nil
|
||||||
@ -179,7 +183,7 @@ func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error)
|
|||||||
if b.uint32() != 1 { // total number of disks
|
if b.uint32() != 1 { // total number of disks
|
||||||
return -1, nil // the file is not a valid zip64-file
|
return -1, nil // the file is not a valid zip64-file
|
||||||
}
|
}
|
||||||
return int64(p), nil
|
return int64(p), nil //nolint:gosec
|
||||||
}
|
}
|
||||||
|
|
||||||
// readDirectory64End reads the zip64 directory end and updates the
|
// readDirectory64End reads the zip64 directory end and updates the
|
||||||
|
|||||||
@ -81,8 +81,6 @@ func (f PackageTaskFactories) Tasks(cfg CatalogingFactoryConfig) ([]Task, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewPackageTask creates a Task function for a generic pkg.Cataloger, honoring the common configuration options.
|
// NewPackageTask creates a Task function for a generic pkg.Cataloger, honoring the common configuration options.
|
||||||
//
|
|
||||||
//nolint:funlen
|
|
||||||
func NewPackageTask(cfg CatalogingFactoryConfig, c pkg.Cataloger, tags ...string) Task {
|
func NewPackageTask(cfg CatalogingFactoryConfig, c pkg.Cataloger, tags ...string) Task {
|
||||||
fn := func(ctx context.Context, resolver file.Resolver, sbom sbomsync.Builder) error {
|
fn := func(ctx context.Context, resolver file.Resolver, sbom sbomsync.Builder) error {
|
||||||
catalogerName := c.Name()
|
catalogerName := c.Name()
|
||||||
|
|||||||
@ -175,7 +175,7 @@ func hasElfDynTag(f *elf.File, tag elf.DynTag) bool {
|
|||||||
t = elf.DynTag(f.ByteOrder.Uint32(d[0:4]))
|
t = elf.DynTag(f.ByteOrder.Uint32(d[0:4]))
|
||||||
d = d[8:]
|
d = d[8:]
|
||||||
case elf.ELFCLASS64:
|
case elf.ELFCLASS64:
|
||||||
t = elf.DynTag(f.ByteOrder.Uint64(d[0:8]))
|
t = elf.DynTag(f.ByteOrder.Uint64(d[0:8])) //nolint:gosec
|
||||||
d = d[16:]
|
d = d[16:]
|
||||||
}
|
}
|
||||||
if t == tag {
|
if t == tag {
|
||||||
|
|||||||
@ -124,7 +124,7 @@ func decodePackageMetadata(vals map[string]string, c *cyclonedx.Component, typeN
|
|||||||
if metadataType == nil {
|
if metadataType == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
metaPtrTyp := reflect.PtrTo(metadataType)
|
metaPtrTyp := reflect.PointerTo(metadataType)
|
||||||
metaPtr := Decode(metaPtrTyp, vals, "syft:metadata", CycloneDXFields)
|
metaPtr := Decode(metaPtrTyp, vals, "syft:metadata", CycloneDXFields)
|
||||||
|
|
||||||
// Map all explicit metadata properties
|
// Map all explicit metadata properties
|
||||||
|
|||||||
@ -72,7 +72,6 @@ func decodeLicenses(c *cyclonedx.Component) []pkg.License {
|
|||||||
return licenses
|
return licenses
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:funlen
|
|
||||||
func separateLicenses(p pkg.Package) (spdx, other cyclonedx.Licenses, expressions []string) {
|
func separateLicenses(p pkg.Package) (spdx, other cyclonedx.Licenses, expressions []string) {
|
||||||
ex := make([]string, 0)
|
ex := make([]string, 0)
|
||||||
spdxc := cyclonedx.Licenses{}
|
spdxc := cyclonedx.Licenses{}
|
||||||
|
|||||||
@ -165,7 +165,7 @@ func Decode(typ reflect.Type, values map[string]string, prefix string, fn FieldN
|
|||||||
|
|
||||||
isSlice := false
|
isSlice := false
|
||||||
if typ.Kind() == reflect.Slice {
|
if typ.Kind() == reflect.Slice {
|
||||||
typ = reflect.PtrTo(typ)
|
typ = reflect.PointerTo(typ)
|
||||||
isSlice = true
|
isSlice = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -34,7 +34,7 @@ const (
|
|||||||
//
|
//
|
||||||
// Available options are: <omit>, NOASSERTION, Person: <person>, Organization: <org>
|
// Available options are: <omit>, NOASSERTION, Person: <person>, Organization: <org>
|
||||||
// return values are: <type>, <value>
|
// return values are: <type>, <value>
|
||||||
func Originator(p pkg.Package) (typ string, author string) { // nolint: funlen
|
func Originator(p pkg.Package) (typ string, author string) { //nolint: funlen
|
||||||
if !hasMetadata(p) {
|
if !hasMetadata(p) {
|
||||||
return typ, author
|
return typ, author
|
||||||
}
|
}
|
||||||
|
|||||||
@ -146,7 +146,7 @@ func safeFileModeConvert(val int) (fs.FileMode, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return os.FileMode(mode), nil
|
return os.FileMode(mode), nil //nolint:gosec
|
||||||
}
|
}
|
||||||
|
|
||||||
func toSyftLicenses(m []model.License) (p []pkg.License) {
|
func toSyftLicenses(m []model.License) (p []pkg.License) {
|
||||||
|
|||||||
@ -120,7 +120,8 @@ func (r *ContainerImageAllLayers) FilesByPath(paths ...string) ([]file.Location,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FilesByGlob returns all file.References that match the given path glob pattern from any layer in the image.
|
// FilesByGlob returns all file.References that match the given path glob pattern from any layer in the image.
|
||||||
// nolint:gocognit
|
//
|
||||||
|
//nolint:gocognit
|
||||||
func (r *ContainerImageAllLayers) FilesByGlob(patterns ...string) ([]file.Location, error) {
|
func (r *ContainerImageAllLayers) FilesByGlob(patterns ...string) ([]file.Location, error) {
|
||||||
uniqueFileIDs := stereoscopeFile.NewFileReferenceSet()
|
uniqueFileIDs := stereoscopeFile.NewFileReferenceSet()
|
||||||
uniqueLocations := make([]file.Location, 0)
|
uniqueLocations := make([]file.Location, 0)
|
||||||
|
|||||||
@ -79,7 +79,8 @@ func (r *ContainerImageSquash) FilesByPath(paths ...string) ([]file.Location, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FilesByGlob returns all file.References that match the given path glob pattern within the squashed representation of the image.
|
// FilesByGlob returns all file.References that match the given path glob pattern within the squashed representation of the image.
|
||||||
// nolint:gocognit
|
//
|
||||||
|
//nolint:gocognit
|
||||||
func (r *ContainerImageSquash) FilesByGlob(patterns ...string) ([]file.Location, error) {
|
func (r *ContainerImageSquash) FilesByGlob(patterns ...string) ([]file.Location, error) {
|
||||||
uniqueFileIDs := stereoscopeFile.NewFileReferenceSet()
|
uniqueFileIDs := stereoscopeFile.NewFileReferenceSet()
|
||||||
uniqueLocations := make([]file.Location, 0)
|
uniqueLocations := make([]file.Location, 0)
|
||||||
|
|||||||
@ -34,7 +34,7 @@ type parsedData struct {
|
|||||||
// parseApkDB parses packages from a given APK "installed" flat-file DB. For more
|
// parseApkDB parses packages from a given APK "installed" flat-file DB. For more
|
||||||
// information on specific fields, see https://wiki.alpinelinux.org/wiki/Apk_spec.
|
// information on specific fields, see https://wiki.alpinelinux.org/wiki/Apk_spec.
|
||||||
//
|
//
|
||||||
//nolint:funlen,gocognit
|
//nolint:funlen
|
||||||
func parseApkDB(_ context.Context, resolver file.Resolver, env *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) {
|
func parseApkDB(_ context.Context, resolver file.Resolver, env *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) {
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
|
|
||||||
|
|||||||
@ -196,7 +196,6 @@ func matchExcluding(matcher EvidenceMatcher, contentPatternsToExclude ...string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocognit
|
|
||||||
func sharedLibraryLookup(sharedLibraryPattern string, sharedLibraryMatcher EvidenceMatcher) EvidenceMatcher {
|
func sharedLibraryLookup(sharedLibraryPattern string, sharedLibraryMatcher EvidenceMatcher) EvidenceMatcher {
|
||||||
pat := regexp.MustCompile(sharedLibraryPattern)
|
pat := regexp.MustCompile(sharedLibraryPattern)
|
||||||
return func(classifier Classifier, context matcherContext) (packages []pkg.Package, _ error) {
|
return func(classifier Classifier, context matcherContext) (packages []pkg.Package, _ error) {
|
||||||
|
|||||||
@ -157,7 +157,7 @@ func copyBinariesFromDockerImage(config config.BinaryFromImage, destination stri
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
cmd := exec.Command("docker", "rm", containerName)
|
cmd := exec.Command("docker", "rm", containerName)
|
||||||
cmd.Run() // nolint:errcheck
|
cmd.Run() //nolint:errcheck
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for i, destinationPath := range config.AllStorePathsForImage(image, destination) {
|
for i, destinationPath := range config.AllStorePathsForImage(image, destination) {
|
||||||
@ -182,7 +182,7 @@ func copyBinaryFromContainer(containerName, containerPath, destinationPath, fing
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", containerName, containerPath), destinationPath) // nolint:gosec
|
cmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", containerName, containerPath), destinationPath) //nolint:gosec
|
||||||
// reason for gosec exception: this is for processing test fixtures only, not used in production
|
// reason for gosec exception: this is for processing test fixtures only, not used in production
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@ -169,7 +169,6 @@ func getAdditionalFileListing(resolver file.Resolver, dbLocation file.Location,
|
|||||||
return files, locations
|
return files, locations
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:dupl
|
|
||||||
func fetchMd5Contents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgDBEntry) (io.ReadCloser, *file.Location) {
|
func fetchMd5Contents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgDBEntry) (io.ReadCloser, *file.Location) {
|
||||||
var md5Reader io.ReadCloser
|
var md5Reader io.ReadCloser
|
||||||
var err error
|
var err error
|
||||||
@ -213,7 +212,6 @@ func fetchMd5Contents(resolver file.Resolver, dbLocation file.Location, m pkg.Dp
|
|||||||
return md5Reader, &l
|
return md5Reader, &l
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:dupl
|
|
||||||
func fetchConffileContents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgDBEntry) (io.ReadCloser, *file.Location) {
|
func fetchConffileContents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgDBEntry) (io.ReadCloser, *file.Location) {
|
||||||
var reader io.ReadCloser
|
var reader io.ReadCloser
|
||||||
var err error
|
var err error
|
||||||
|
|||||||
@ -230,7 +230,7 @@ func handleNewKeyValue(line string) (key string, val interface{}, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("bad installed-size value=%q: %w", val, err)
|
return "", nil, fmt.Errorf("bad installed-size value=%q: %w", val, err)
|
||||||
}
|
}
|
||||||
return key, int(s), nil
|
return key, int(s), nil //nolint:gosec
|
||||||
default:
|
default:
|
||||||
return key, val, nil
|
return key, val, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -264,7 +264,6 @@ func (p *CatalogTester) TestCataloger(t *testing.T, cataloger pkg.Cataloger) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:funlen
|
|
||||||
func (p *CatalogTester) assertPkgs(t *testing.T, pkgs []pkg.Package, relationships []artifact.Relationship) {
|
func (p *CatalogTester) assertPkgs(t *testing.T, pkgs []pkg.Package, relationships []artifact.Relationship) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
|||||||
@ -268,7 +268,7 @@ func newPE(filename string, r io.ReaderAt) (nativeImage, error) {
|
|||||||
}
|
}
|
||||||
exportSymbolsOffset := uint64(exportSymbolsDataDirectory.VirtualAddress)
|
exportSymbolsOffset := uint64(exportSymbolsDataDirectory.VirtualAddress)
|
||||||
exports := make([]byte, exportSymbolsDataDirectory.Size)
|
exports := make([]byte, exportSymbolsDataDirectory.Size)
|
||||||
_, err = r.ReadAt(exports, int64(exportSymbolsOffset))
|
_, err = r.ReadAt(exports, int64(exportSymbolsOffset)) //nolint:gosec
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fileError(filename, fmt.Errorf("could not read the exported symbols data directory: %w", err))
|
return fileError(filename, fmt.Errorf("could not read the exported symbols data directory: %w", err))
|
||||||
}
|
}
|
||||||
@ -412,7 +412,7 @@ func (ni nativeImagePE) fetchExportAttribute(i int) (uint32, error) {
|
|||||||
func (ni nativeImagePE) fetchExportFunctionPointer(functionsBase uint32, i uint32) (uint32, error) {
|
func (ni nativeImagePE) fetchExportFunctionPointer(functionsBase uint32, i uint32) (uint32, error) {
|
||||||
var pointer uint32
|
var pointer uint32
|
||||||
|
|
||||||
n := uint32(len(ni.exports))
|
n := uint32(len(ni.exports)) //nolint:gosec
|
||||||
sz := uint32(unsafe.Sizeof(ni.t.functionPointer))
|
sz := uint32(unsafe.Sizeof(ni.t.functionPointer))
|
||||||
j := functionsBase + i*sz
|
j := functionsBase + i*sz
|
||||||
if j+sz >= n {
|
if j+sz >= n {
|
||||||
@ -457,7 +457,7 @@ func (ni nativeImagePE) fetchSbomSymbols(content *exportContentPE) {
|
|||||||
sbomBytes := []byte(nativeImageSbomSymbol + "\x00")
|
sbomBytes := []byte(nativeImageSbomSymbol + "\x00")
|
||||||
sbomLengthBytes := []byte(nativeImageSbomLengthSymbol + "\x00")
|
sbomLengthBytes := []byte(nativeImageSbomLengthSymbol + "\x00")
|
||||||
svmVersionInfoBytes := []byte(nativeImageSbomVersionSymbol + "\x00")
|
svmVersionInfoBytes := []byte(nativeImageSbomVersionSymbol + "\x00")
|
||||||
n := uint32(len(ni.exports))
|
n := uint32(len(ni.exports)) //nolint:gosec
|
||||||
|
|
||||||
// Find SBOM, SBOM Length, and SVM Version Symbol
|
// Find SBOM, SBOM Length, and SVM Version Symbol
|
||||||
for i := uint32(0); i < content.numberOfNames; i++ {
|
for i := uint32(0); i < content.numberOfNames; i++ {
|
||||||
|
|||||||
@ -342,7 +342,7 @@ func (r *mavenResolver) findPomInRemoteRepository(ctx context.Context, groupID,
|
|||||||
Timeout: r.remoteRequestTimeout,
|
Timeout: r.remoteRequestTimeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := client.Do(req) //nolint:bodyclose
|
resp, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to get pom from Maven repository %v: %w", requestURL, err)
|
return nil, fmt.Errorf("unable to get pom from Maven repository %v: %w", requestURL, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -60,10 +60,10 @@ func readStruct(metadata any, fields ...string) string {
|
|||||||
if len(fields) > 0 {
|
if len(fields) > 0 {
|
||||||
value, ok := metadata.(map[any]any)
|
value, ok := metadata.(map[any]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Tracef("unable to read '%s' from: %v", fields[0], metadata)
|
log.Tracef("unable to read '%s' from: %v", fields[0], metadata) //nolint:gosec
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return readStruct(value[fields[0]], fields[1:]...)
|
return readStruct(value[fields[0]], fields[1:]...) //nolint:gosec
|
||||||
}
|
}
|
||||||
value, ok := metadata.(string)
|
value, ok := metadata.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|||||||
@ -88,12 +88,12 @@ func mapFiles(files []rpmutils.FileInfo, digestAlgorithm string) []pkg.RpmFileRe
|
|||||||
}
|
}
|
||||||
out = append(out, pkg.RpmFileRecord{
|
out = append(out, pkg.RpmFileRecord{
|
||||||
Path: f.Name(),
|
Path: f.Name(),
|
||||||
Mode: pkg.RpmFileMode(f.Mode()),
|
Mode: pkg.RpmFileMode(f.Mode()), //nolint:gosec
|
||||||
Size: int(f.Size()),
|
Size: int(f.Size()),
|
||||||
Digest: digest,
|
Digest: digest,
|
||||||
UserName: f.UserName(),
|
UserName: f.UserName(),
|
||||||
GroupName: f.GroupName(),
|
GroupName: f.GroupName(),
|
||||||
Flags: rpmdb.FileFlags(f.Flags()).String(),
|
Flags: rpmdb.FileFlags(f.Flags()).String(), //nolint:gosec
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
|
|||||||
@ -17,7 +17,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// parseRpmDb parses an "Packages" RPM DB and returns the Packages listed within it.
|
// parseRpmDb parses an "Packages" RPM DB and returns the Packages listed within it.
|
||||||
// nolint:funlen
|
//
|
||||||
|
//nolint:funlen
|
||||||
func parseRpmDB(_ context.Context, resolver file.Resolver, env *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) {
|
func parseRpmDB(_ context.Context, resolver file.Resolver, env *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) {
|
||||||
f, err := os.CreateTemp("", "rpmdb")
|
f, err := os.CreateTemp("", "rpmdb")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -29,7 +29,6 @@ func newPackagesFromAudit(location file.Location, versionInfo rustaudit.VersionI
|
|||||||
var pkgs []pkg.Package
|
var pkgs []pkg.Package
|
||||||
|
|
||||||
for _, dep := range versionInfo.Packages {
|
for _, dep := range versionInfo.Packages {
|
||||||
dep := dep
|
|
||||||
p := newPackageFromAudit(&dep, location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation))
|
p := newPackageFromAudit(&dep, location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation))
|
||||||
if pkg.IsValid(&p) && dep.Kind == rustaudit.Runtime {
|
if pkg.IsValid(&p) && dep.Kind == rustaudit.Runtime {
|
||||||
pkgs = append(pkgs, p)
|
pkgs = append(pkgs, p)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user