mirror of
https://github.com/anchore/syft.git
synced 2025-11-17 08:23:15 +01:00
fix: reduce warn levels to debug for non-actionable errors (#3645)
Signed-off-by: Keith Zantow <kzantow@gmail.com>
This commit is contained in:
parent
52d543f3c1
commit
2328b20082
@ -61,7 +61,7 @@ type backgroundLineReader struct {
|
||||
func (m *Handler) handleAttestationStarted(e partybus.Event) []tea.Model {
|
||||
reader, prog, taskInfo, err := syftEventParsers.ParseAttestationStartedEvent(e)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warn("unable to parse event")
|
||||
log.WithFields("error", err).Debug("unable to parse event")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -89,7 +89,7 @@ func (cts catalogerTaskModel) View() string {
|
||||
func (m *Handler) handleCatalogerTaskStarted(e partybus.Event) ([]tea.Model, tea.Cmd) {
|
||||
mon, info, err := syftEventParsers.ParseCatalogerTaskStarted(e)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warn("unable to parse event")
|
||||
log.WithFields("error", err).Debug("unable to parse event")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
func (m *Handler) handleFetchImage(e partybus.Event) []tea.Model {
|
||||
imgName, prog, err := stereoEventParsers.ParseFetchImage(e)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warn("unable to parse event")
|
||||
log.WithFields("error", err).Debug("unable to parse event")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
func (m *Handler) handleFileIndexingStarted(e partybus.Event) []tea.Model {
|
||||
path, prog, err := syftEventParsers.ParseFileIndexingStarted(e)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warn("unable to parse event")
|
||||
log.WithFields("error", err).Debug("unable to parse event")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -43,7 +43,7 @@ type containerdPullStatusFormatter struct {
|
||||
func (m *Handler) handlePullContainerdImage(e partybus.Event) []tea.Model {
|
||||
_, pullStatus, err := stereoscopeParsers.ParsePullContainerdImage(e)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warn("unable to parse event")
|
||||
log.WithFields("error", err).Debug("unable to parse event")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -44,7 +44,7 @@ type dockerPullStatusFormatter struct {
|
||||
func (m *Handler) handlePullDockerImage(e partybus.Event) []tea.Model {
|
||||
_, pullStatus, err := stereoscopeParsers.ParsePullDockerImage(e)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warn("unable to parse event")
|
||||
log.WithFields("error", err).Debug("unable to parse event")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
func (m *Handler) handleReadImage(e partybus.Event) []tea.Model {
|
||||
imgMetadata, prog, err := stereoEventParsers.ParseReadImage(e)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warn("unable to parse event")
|
||||
log.WithFields("error", err).Debug("unable to parse event")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -150,7 +150,7 @@ func (m *UI) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
|
||||
case partybus.Event:
|
||||
log.WithFields("component", "ui").Tracef("event: %q", msg.Type)
|
||||
log.WithFields("component", "ui", "event", msg.Type).Trace("event")
|
||||
|
||||
switch msg.Type {
|
||||
case event.CLIReport, event.CLINotification, event.CLIAppUpdateAvailable:
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
// CloseAndLogError closes the given io.Closer and reports any errors found as a warning in the log
|
||||
func CloseAndLogError(closer io.Closer, location string) {
|
||||
if err := closer.Close(); err != nil {
|
||||
log.Warnf("unable to close file for location=%q: %+v", location, err)
|
||||
log.Debugf("unable to close file for location=%q: %+v", location, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -101,7 +101,7 @@ func finalizePkgCatalogerResults(cfg CatalogingFactoryConfig, resolver file.Path
|
||||
// create file-to-package relationships for files owned by the package
|
||||
owningRelationships, err := packageFileOwnershipRelationships(p, resolver)
|
||||
if err != nil {
|
||||
log.Warnf("unable to create any package-file relationships for package name=%q type=%q: %v", p.Name, p.Type, err)
|
||||
log.Debugf("unable to create any package-file relationships for package name=%q type=%q: %v", p.Name, p.Type, err)
|
||||
} else {
|
||||
relationships = append(relationships, owningRelationships...)
|
||||
}
|
||||
@ -174,7 +174,7 @@ func applyComplianceRules(p *pkg.Package, cfg cataloging.ComplianceConfig) (bool
|
||||
return true
|
||||
|
||||
case cataloging.ComplianceActionKeep:
|
||||
log.WithFields("pkg", p.String(), "location", loc).Tracef("package with missing %s, taking no action", fieldName)
|
||||
log.WithFields("pkg", p.String(), "location", loc, "field", fieldName).Trace("package with missing field, taking no action")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@ -23,7 +23,7 @@ func ProcessPathErrors(err error) error {
|
||||
}
|
||||
}
|
||||
unknowns, remainingErrors := ExtractCoordinateErrors(err)
|
||||
log.Warn(remainingErrors)
|
||||
log.Debug(remainingErrors)
|
||||
|
||||
var out []error
|
||||
for _, u := range unknowns {
|
||||
|
||||
@ -86,14 +86,14 @@ func (i *Cataloger) Catalog(resolver file.Resolver) (map[file.Coordinates]file.E
|
||||
func processExecutableLocation(loc file.Location, resolver file.Resolver) (*file.Executable, error) {
|
||||
reader, err := resolver.FileContentsByLocation(loc)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warnf("unable to get file contents for %q", loc.RealPath)
|
||||
log.WithFields("error", err, "path", loc.RealPath).Debug("unable to get file contents")
|
||||
return nil, fmt.Errorf("unable to get file contents: %w", err)
|
||||
}
|
||||
defer internal.CloseAndLogError(reader, loc.RealPath)
|
||||
|
||||
uReader, err := unionreader.GetUnionReader(reader)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warnf("unable to get union reader for %q", loc.RealPath)
|
||||
log.WithFields("error", err, "path", loc.RealPath).Debug("unable to get union reader")
|
||||
return nil, fmt.Errorf("unable to get union reader: %w", err)
|
||||
}
|
||||
|
||||
@ -168,17 +168,17 @@ func processExecutable(loc file.Location, reader unionreader.UnionReader) (*file
|
||||
switch format {
|
||||
case file.ELF:
|
||||
if err = findELFFeatures(&data, reader); err != nil {
|
||||
log.WithFields("error", err).Tracef("unable to determine ELF features for %q", loc.RealPath)
|
||||
log.WithFields("error", err, "path", loc.RealPath).Trace("unable to determine ELF features")
|
||||
err = fmt.Errorf("unable to determine ELF features: %w", err)
|
||||
}
|
||||
case file.PE:
|
||||
if err = findPEFeatures(&data, reader); err != nil {
|
||||
log.WithFields("error", err).Tracef("unable to determine PE features for %q", loc.RealPath)
|
||||
log.WithFields("error", err, "path", loc.RealPath).Trace("unable to determine PE features")
|
||||
err = fmt.Errorf("unable to determine PE features: %w", err)
|
||||
}
|
||||
case file.MachO:
|
||||
if err = findMachoFeatures(&data, reader); err != nil {
|
||||
log.WithFields("error", err).Tracef("unable to determine Macho features for %q", loc.RealPath)
|
||||
log.WithFields("error", err, "path", loc.RealPath).Trace("unable to determine Macho features")
|
||||
err = fmt.Errorf("unable to determine Macho features: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -14,14 +14,14 @@ func AllRegularFiles(ctx context.Context, resolver file.Resolver) (locations []f
|
||||
for location := range resolver.AllLocations(ctx) {
|
||||
resolvedLocations, err := resolver.FilesByPath(location.RealPath)
|
||||
if err != nil {
|
||||
log.Warnf("unable to resolve %+v: %+v", location, err)
|
||||
log.Debugf("unable to resolve %+v: %+v", location, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, resolvedLocation := range resolvedLocations {
|
||||
metadata, err := resolver.FileMetadataByLocation(resolvedLocation)
|
||||
if err != nil {
|
||||
log.Warnf("unable to get metadata for %+v: %+v", location, err)
|
||||
log.Debugf("unable to get metadata for %+v: %+v", location, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@ func (c Coordinates) ID() artifact.ID {
|
||||
f, err := artifact.IDByHash(c)
|
||||
if err != nil {
|
||||
// TODO: what to do in this case?
|
||||
log.Warnf("unable to get fingerprint of location coordinate=%+v: %+v", c, err)
|
||||
log.Debugf("unable to get fingerprint of location coordinate=%+v: %+v", c, err)
|
||||
return ""
|
||||
}
|
||||
|
||||
|
||||
@ -302,7 +302,7 @@ func toBomDescriptorComponent(srcMetadata source.Description) *cyclonedx.Compone
|
||||
}
|
||||
bomRef, err := artifact.IDByHash(metadata.ID)
|
||||
if err != nil {
|
||||
log.Warnf("unable to get fingerprint of source image metadata=%s: %+v", metadata.ID, err)
|
||||
log.Debugf("unable to get fingerprint of source image metadata=%s: %+v", metadata.ID, err)
|
||||
}
|
||||
return &cyclonedx.Component{
|
||||
BOMRef: string(bomRef),
|
||||
@ -316,7 +316,7 @@ func toBomDescriptorComponent(srcMetadata source.Description) *cyclonedx.Compone
|
||||
}
|
||||
bomRef, err := artifact.IDByHash(metadata.Path)
|
||||
if err != nil {
|
||||
log.Warnf("unable to get fingerprint of source directory metadata path=%s: %+v", metadata.Path, err)
|
||||
log.Debugf("unable to get fingerprint of source directory metadata path=%s: %+v", metadata.Path, err)
|
||||
}
|
||||
return &cyclonedx.Component{
|
||||
BOMRef: string(bomRef),
|
||||
@ -331,7 +331,7 @@ func toBomDescriptorComponent(srcMetadata source.Description) *cyclonedx.Compone
|
||||
}
|
||||
bomRef, err := artifact.IDByHash(metadata.Path)
|
||||
if err != nil {
|
||||
log.Warnf("unable to get fingerprint of source file metadata path=%s: %+v", metadata.Path, err)
|
||||
log.Debugf("unable to get fingerprint of source file metadata path=%s: %+v", metadata.Path, err)
|
||||
}
|
||||
return &cyclonedx.Component{
|
||||
BOMRef: string(bomRef),
|
||||
|
||||
@ -164,7 +164,7 @@ func toDependencies(s *sbom.SBOM, p pkg.Package) (out []string) {
|
||||
func dependencyName(p pkg.Package) string {
|
||||
purl, err := packageurl.FromString(p.PURL)
|
||||
if err != nil {
|
||||
log.Warnf("Invalid PURL for package: '%s' PURL: '%s' (%w)", p.Name, p.PURL, err)
|
||||
log.Debugf("Invalid PURL for package: '%s' PURL: '%s' (%w)", p.Name, p.PURL, err)
|
||||
return ""
|
||||
}
|
||||
// don't use qualifiers for this
|
||||
|
||||
@ -136,7 +136,7 @@ func encode(out map[string]string, value reflect.Value, prefix string, fn FieldN
|
||||
encode(out, value.MapIndex(key), fmt.Sprintf("%s:%v", prefix, key.Interface()), fn)
|
||||
}
|
||||
default:
|
||||
log.Warnf("skipping encoding of unsupported property: %s", prefix)
|
||||
log.Debugf("skipping encoding of unsupported property: %s", prefix)
|
||||
}
|
||||
}
|
||||
|
||||
@ -213,7 +213,7 @@ func decode(vals map[string]string, value reflect.Value, prefix string, fn Field
|
||||
}
|
||||
if decode(vals, v.Elem(), prefix, fn) && value.CanSet() {
|
||||
o := v.Interface()
|
||||
log.Infof("%v", o)
|
||||
log.Tracef("%v", o)
|
||||
value.Set(v)
|
||||
} else {
|
||||
return false
|
||||
@ -355,7 +355,7 @@ func decode(vals map[string]string, value reflect.Value, prefix string, fn Field
|
||||
}
|
||||
return values
|
||||
default:
|
||||
log.Warnf("unable to set field: %s", prefix)
|
||||
log.Debugf("unable to set field: %s", prefix)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@ -160,7 +160,7 @@ func toFileMetadataEntry(coordinates file.Coordinates, metadata *file.Metadata)
|
||||
|
||||
mode, err = strconv.Atoi(fmt.Sprintf("%o", metadata.Mode()))
|
||||
if err != nil {
|
||||
log.Warnf("invalid mode found in file catalog @ location=%+v mode=%q: %+v", coordinates, metadata.Mode, err)
|
||||
log.Debugf("invalid mode found in file catalog @ location=%+v mode=%q: %+v", coordinates, metadata.Mode, err)
|
||||
mode = 0
|
||||
}
|
||||
|
||||
|
||||
@ -34,7 +34,7 @@ type pathSkipper struct {
|
||||
func skipPathsByMountTypeAndName(root string) PathIndexVisitor {
|
||||
infos, err := mountinfo.GetMounts(nil)
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Warnf("unable to get system mounts")
|
||||
log.WithFields("error", err).Debug("unable to get system mounts")
|
||||
return func(_ string, _ string, _ os.FileInfo, _ error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -82,12 +82,12 @@ func parseApkDB(_ context.Context, resolver file.Resolver, env *generic.Environm
|
||||
|
||||
field := parseApkField(line)
|
||||
if field == nil {
|
||||
log.Warnf("unable to parse field data from line %q", line)
|
||||
log.Debugf("unable to parse field data from line %q", line)
|
||||
errs = unknown.Appendf(errs, reader, "unable to parse field data from line %q", line)
|
||||
continue
|
||||
}
|
||||
if len(field.name) == 0 {
|
||||
log.Warnf("failed to parse field name from line %q", line)
|
||||
log.Debugf("failed to parse field name from line %q", line)
|
||||
errs = unknown.Appendf(errs, reader, "failed to parse field name from line %q", line)
|
||||
continue
|
||||
}
|
||||
@ -234,7 +234,7 @@ func (f apkField) apply(p *parsedData, ctx *apkFileParsingContext) {
|
||||
case "S":
|
||||
i, err := strconv.Atoi(f.value)
|
||||
if err != nil {
|
||||
log.Warnf("unable to parse value %q for field %q: %w", f.value, f.name, err)
|
||||
log.Debugf("unable to parse value %q for field %q: %w", f.value, f.name, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -242,7 +242,7 @@ func (f apkField) apply(p *parsedData, ctx *apkFileParsingContext) {
|
||||
case "I":
|
||||
i, err := strconv.Atoi(f.value)
|
||||
if err != nil {
|
||||
log.Warnf("unable to parse value %q for field %q: %w", f.value, f.name, err)
|
||||
log.Debugf("unable to parse value %q for field %q: %w", f.value, f.name, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -272,7 +272,7 @@ func (f apkField) apply(p *parsedData, ctx *apkFileParsingContext) {
|
||||
var ok bool
|
||||
latest.OwnerUID, latest.OwnerGID, latest.Permissions, ok = processFileInfo(f.value)
|
||||
if !ok {
|
||||
log.Warnf("unexpected value for APK ACL field %q: %q", f.name, f.value)
|
||||
log.Debugf("unexpected value for APK ACL field %q: %q", f.name, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
@ -298,7 +298,7 @@ func (f apkField) apply(p *parsedData, ctx *apkFileParsingContext) {
|
||||
var ok bool
|
||||
latest.OwnerUID, latest.OwnerGID, latest.Permissions, ok = processFileInfo(f.value)
|
||||
if !ok {
|
||||
log.Warnf("unexpected value for APK ACL field %q: %q", f.name, f.value)
|
||||
log.Debugf("unexpected value for APK ACL field %q: %q", f.name, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@ -72,12 +72,12 @@ func parseRebarLock(_ context.Context, _ file.Resolver, _ *generic.Environment,
|
||||
|
||||
sourcePkg := pkgMap[name]
|
||||
if sourcePkg == nil {
|
||||
log.WithFields("package", name).Warn("unable find source package")
|
||||
log.WithFields("package", name).Debug("unable find source package")
|
||||
continue
|
||||
}
|
||||
metadata, ok := sourcePkg.Metadata.(pkg.ErlangRebarLockEntry)
|
||||
if !ok {
|
||||
log.WithFields("package", name).Warn("unable to extract rebar.lock metadata to add hash metadata")
|
||||
log.WithFields("package", name).Debug("unable to extract rebar.lock metadata to add hash metadata")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -63,7 +63,7 @@ func (c *Cataloger) WithParserByGlobs(parser Parser, globs ...string) *Cataloger
|
||||
|
||||
matches, err := resolver.FilesByGlob(g)
|
||||
if err != nil {
|
||||
log.Warnf("unable to process glob=%q: %+v", g, err)
|
||||
log.Debugf("unable to process glob=%q: %+v", g, err)
|
||||
continue
|
||||
}
|
||||
requests = append(requests, makeRequests(parser, matches)...)
|
||||
@ -81,7 +81,7 @@ func (c *Cataloger) WithParserByMimeTypes(parser Parser, types ...string) *Catal
|
||||
log.WithFields("mimetypes", types).Trace("searching for paths matching mimetype")
|
||||
matches, err := resolver.FilesByMIMEType(types...)
|
||||
if err != nil {
|
||||
log.Warnf("unable to process mimetypes=%+v: %+v", types, err)
|
||||
log.Debugf("unable to process mimetypes=%+v: %+v", types, err)
|
||||
return nil
|
||||
}
|
||||
requests = append(requests, makeRequests(parser, matches)...)
|
||||
@ -100,7 +100,7 @@ func (c *Cataloger) WithParserByPath(parser Parser, paths ...string) *Cataloger
|
||||
|
||||
matches, err := resolver.FilesByPath(p)
|
||||
if err != nil {
|
||||
log.Warnf("unable to process path=%q: %+v", p, err)
|
||||
log.Debugf("unable to process path=%q: %+v", p, err)
|
||||
continue
|
||||
}
|
||||
requests = append(requests, makeRequests(parser, matches)...)
|
||||
@ -192,7 +192,7 @@ func (c *Cataloger) process(ctx context.Context, resolver file.Resolver, pkgs []
|
||||
func invokeParser(ctx context.Context, resolver file.Resolver, location file.Location, logger logger.Logger, parser Parser, env *Environment) ([]pkg.Package, []artifact.Relationship, error) {
|
||||
contentReader, err := resolver.FileContentsByLocation(location)
|
||||
if err != nil {
|
||||
logger.WithFields("location", location.RealPath, "error", err).Warn("unable to fetch contents")
|
||||
logger.WithFields("location", location.RealPath, "error", err).Debug("unable to fetch contents")
|
||||
return nil, nil, err
|
||||
}
|
||||
defer internal.CloseAndLogError(contentReader, location.AccessPath)
|
||||
|
||||
@ -34,7 +34,7 @@ func parsePortageContents(_ context.Context, resolver file.Resolver, _ *generic.
|
||||
|
||||
name, version := cpvMatch[1], cpvMatch[2]
|
||||
if name == "" || version == "" {
|
||||
log.WithFields("path", reader.Location.RealPath).Warnf("failed to parse portage name and version")
|
||||
log.WithFields("path", reader.Location.RealPath).Debug("failed to parse portage name and version")
|
||||
return nil, nil, fmt.Errorf("failed to parse portage name and version")
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ func parsePortageContents(_ context.Context, resolver file.Resolver, _ *generic.
|
||||
func addFiles(resolver file.Resolver, dbLocation file.Location, p *pkg.Package) {
|
||||
contentsReader, err := resolver.FileContentsByLocation(dbLocation)
|
||||
if err != nil {
|
||||
log.WithFields("path", dbLocation.RealPath).Warnf("failed to fetch portage contents (package=%s): %+v", p.Name, err)
|
||||
log.WithFields("path", dbLocation.RealPath, "package", p.Name, "error", err).Debug("failed to fetch portage contents")
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogError(contentsReader, dbLocation.RealPath)
|
||||
@ -105,7 +105,7 @@ func addLicenses(resolver file.Resolver, dbLocation file.Location, p *pkg.Packag
|
||||
|
||||
licenseReader, err := resolver.FileContentsByLocation(*location)
|
||||
if err != nil {
|
||||
log.WithFields("path", dbLocation.RealPath).Warnf("failed to fetch portage LICENSE: %+v", err)
|
||||
log.WithFields("path", dbLocation.RealPath, "error", err).Debug("failed to fetch portage LICENSE")
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogError(licenseReader, location.RealPath)
|
||||
@ -141,7 +141,7 @@ func addSize(resolver file.Resolver, dbLocation file.Location, p *pkg.Package) {
|
||||
|
||||
sizeReader, err := resolver.FileContentsByLocation(*location)
|
||||
if err != nil {
|
||||
log.WithFields("name", p.Name).Warnf("failed to fetch portage SIZE: %+v", err)
|
||||
log.WithFields("name", p.Name, "error", err).Debug("failed to fetch portage SIZE")
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogError(sizeReader, location.RealPath)
|
||||
|
||||
@ -80,7 +80,7 @@ func defaultGoModDir() string {
|
||||
if goPath == "" {
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
log.Warnf("unable to determine GOPATH or user home dir: %w", err)
|
||||
log.Debugf("unable to determine GOPATH or user home dir: %w", err)
|
||||
return ""
|
||||
}
|
||||
goPath = filepath.Join(homeDir, "go")
|
||||
|
||||
@ -60,7 +60,7 @@ func newGoLicenseResolver(catalogerName string, opts CatalogerConfig) goLicenseR
|
||||
if vendorDir == "" {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Warn("unable to get CWD while resolving the local go vendor dir: %v", err)
|
||||
log.Debug("unable to get CWD while resolving the local go vendor dir: %v", err)
|
||||
} else {
|
||||
vendorDir = filepath.Join(wd, "vendor")
|
||||
}
|
||||
|
||||
@ -26,7 +26,7 @@ func scanFile(location file.Location, reader unionreader.UnionReader) ([]*extend
|
||||
// with more than one binary
|
||||
readers, errs := unionreader.GetReaders(reader)
|
||||
if errs != nil {
|
||||
log.WithFields("error", errs).Warnf("failed to open a golang binary")
|
||||
log.WithFields("error", errs).Debug("failed to open a golang binary")
|
||||
return nil, fmt.Errorf("failed to open a golang binary: %w", errs)
|
||||
}
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ func parseStackLock(_ context.Context, _ file.Resolver, _ *generic.Environment,
|
||||
var lockFile stackLock
|
||||
|
||||
if err := yaml.Unmarshal(bytes, &lockFile); err != nil {
|
||||
log.WithFields("error", err).Tracef("failed to parse stack.yaml.lock file %q", reader.RealPath)
|
||||
log.WithFields("error", err, "path", reader.RealPath).Trace("failed to parse stack.yaml.lock")
|
||||
return nil, nil, fmt.Errorf("failed to parse stack.yaml.lock file")
|
||||
}
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ func parseStackYaml(_ context.Context, _ file.Resolver, _ *generic.Environment,
|
||||
var stackFile stackYaml
|
||||
|
||||
if err := yaml.Unmarshal(bytes, &stackFile); err != nil {
|
||||
log.WithFields("error", err).Tracef("failed to parse stack.yaml file %q", reader.RealPath)
|
||||
log.WithFields("error", err, "path", reader.RealPath).Trace("failed to parse stack.yaml")
|
||||
return nil, nil, fmt.Errorf("failed to parse stack.yaml file")
|
||||
}
|
||||
|
||||
|
||||
@ -210,7 +210,7 @@ func finalizePackage(p *pkg.Package) {
|
||||
p.Type = pkg.JenkinsPluginPkg
|
||||
}
|
||||
} else {
|
||||
log.WithFields("package", p.String()).Warn("unable to extract java metadata to generate purl")
|
||||
log.WithFields("package", p.String()).Debug("unable to extract java metadata to generate purl")
|
||||
}
|
||||
|
||||
p.SetID()
|
||||
@ -237,7 +237,7 @@ func (j *archiveParser) discoverMainPackage(ctx context.Context) (*pkg.Package,
|
||||
manifestContents := contents[manifestMatches[0]]
|
||||
manifest, err := parseJavaManifest(j.archivePath, strings.NewReader(manifestContents))
|
||||
if err != nil {
|
||||
log.Warnf("failed to parse java manifest (%s): %+v", j.location, err)
|
||||
log.Debugf("failed to parse java manifest (%s): %+v", j.location, err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -485,7 +485,7 @@ func getDigestsFromArchive(archivePath string) ([]file.Digest, error) {
|
||||
// grab and assign digest for the entire archive
|
||||
digests, err := intFile.NewDigestsFromFile(archiveCloser, javaArchiveHashes)
|
||||
if err != nil {
|
||||
log.Warnf("failed to create digest for file=%q: %+v", archivePath, err)
|
||||
log.Debugf("failed to create digest for file=%q: %+v", archivePath, err)
|
||||
}
|
||||
|
||||
return digests, nil
|
||||
@ -549,7 +549,7 @@ func discoverPkgsFromOpeners(ctx context.Context, location file.Location, opener
|
||||
for pathWithinArchive, archiveOpener := range openers {
|
||||
nestedPkgs, nestedRelationships, err := discoverPkgsFromOpener(ctx, location, pathWithinArchive, archiveOpener, cfg, parentPkg)
|
||||
if err != nil {
|
||||
log.WithFields("location", location.Path()).Warnf("unable to discover java packages from opener: %+v", err)
|
||||
log.WithFields("location", location.Path(), "error", err).Debug("unable to discover java packages from opener")
|
||||
continue
|
||||
}
|
||||
|
||||
@ -578,7 +578,7 @@ func discoverPkgsFromOpener(ctx context.Context, location file.Location, pathWit
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := archiveReadCloser.Close(); closeErr != nil {
|
||||
log.Warnf("unable to close archived file from tempdir: %+v", closeErr)
|
||||
log.Debugf("unable to close archived file from tempdir: %+v", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -607,7 +607,7 @@ func pomPropertiesByParentPath(archivePath string, location file.Location, extra
|
||||
for filePath, fileContents := range contentsOfMavenPropertiesFiles {
|
||||
pomProperties, err := parsePomProperties(filePath, strings.NewReader(fileContents))
|
||||
if err != nil {
|
||||
log.WithFields("contents-path", filePath, "location", location.Path()).Warnf("failed to parse pom.properties: %+v", err)
|
||||
log.WithFields("contents-path", filePath, "location", location.Path(), "error", err).Debug("failed to parse pom.properties")
|
||||
continue
|
||||
}
|
||||
|
||||
@ -637,7 +637,7 @@ func pomProjectByParentPath(archivePath string, location file.Location, extractP
|
||||
// TODO: when we support locations of paths within archives we should start passing the specific pom.xml location object instead of the top jar
|
||||
pom, err := maven.ParsePomXML(strings.NewReader(fileContents))
|
||||
if err != nil {
|
||||
log.WithFields("contents-path", filePath, "location", location.Path()).Warnf("failed to parse pom.xml: %+v", err)
|
||||
log.WithFields("contents-path", filePath, "location", location.Path(), "error", err).Debug("failed to parse pom.xml")
|
||||
continue
|
||||
}
|
||||
if pom == nil {
|
||||
|
||||
@ -20,7 +20,7 @@ import (
|
||||
func newPackageJSONPackage(u packageJSON, indexLocation file.Location) pkg.Package {
|
||||
licenseCandidates, err := u.licensesFromJSON()
|
||||
if err != nil {
|
||||
log.Warnf("unable to extract licenses from javascript package.json: %+v", err)
|
||||
log.Debugf("unable to extract licenses from javascript package.json: %+v", err)
|
||||
}
|
||||
|
||||
license := pkg.NewLicensesFromLocation(indexLocation, licenseCandidates...)
|
||||
@ -73,7 +73,7 @@ func newPackageLockV1Package(cfg CatalogerConfig, resolver file.Resolver, locati
|
||||
licenseSet = pkg.NewLicenseSet(licenses...)
|
||||
}
|
||||
if err != nil {
|
||||
log.Warnf("unable to extract licenses from javascript yarn.lock for package %s:%s: %+v", name, version, err)
|
||||
log.Debugf("unable to extract licenses from javascript yarn.lock for package %s:%s: %+v", name, version, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ func newPackageLockV2Package(cfg CatalogerConfig, resolver file.Resolver, locati
|
||||
licenseSet = pkg.NewLicenseSet(licenses...)
|
||||
}
|
||||
if err != nil {
|
||||
log.Warnf("unable to extract licenses from javascript yarn.lock for package %s:%s: %+v", name, u.Version, err)
|
||||
log.Debugf("unable to extract licenses from javascript yarn.lock for package %s:%s: %+v", name, u.Version, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ func newYarnLockPackage(cfg CatalogerConfig, resolver file.Resolver, location fi
|
||||
licenseSet = pkg.NewLicenseSet(licenses...)
|
||||
}
|
||||
if err != nil {
|
||||
log.Warnf("unable to extract licenses from javascript yarn.lock for package %s:%s: %+v", name, version, err)
|
||||
log.Debugf("unable to extract licenses from javascript yarn.lock for package %s:%s: %+v", name, version, err)
|
||||
}
|
||||
}
|
||||
return finalizeLockPkg(
|
||||
|
||||
@ -73,7 +73,7 @@ func parseLinuxKernelMetadata(magicType []string) (p pkg.LinuxKernel) {
|
||||
swapDevStr := strings.TrimPrefix(t, "swap_dev ")
|
||||
swapDev, err := strconv.ParseInt(swapDevStr, 16, 32)
|
||||
if err != nil {
|
||||
log.Warnf("unable to parse swap device: %s", err)
|
||||
log.Debugf("unable to parse swap device: %s", err)
|
||||
continue
|
||||
}
|
||||
p.SwapDevice = int(swapDev)
|
||||
@ -81,7 +81,7 @@ func parseLinuxKernelMetadata(magicType []string) (p pkg.LinuxKernel) {
|
||||
rootDevStr := strings.TrimPrefix(t, "root_dev ")
|
||||
rootDev, err := strconv.ParseInt(rootDevStr, 16, 32)
|
||||
if err != nil {
|
||||
log.Warnf("unable to parse root device: %s", err)
|
||||
log.Debugf("unable to parse root device: %s", err)
|
||||
continue
|
||||
}
|
||||
p.SwapDevice = int(rootDev)
|
||||
|
||||
@ -68,13 +68,13 @@ func (c *storeCataloger) Catalog(ctx context.Context, resolver file.Resolver) ([
|
||||
p := &pkgs[i]
|
||||
locations := p.Locations.ToSlice()
|
||||
if len(locations) == 0 {
|
||||
log.WithFields("package", p.Name).Warn("nix package has no evidence locations associated")
|
||||
log.WithFields("package", p.Name).Debug("nix package has no evidence locations associated")
|
||||
continue
|
||||
}
|
||||
parentStorePath := locations[0].RealPath
|
||||
files, ok := filesByPath[parentStorePath]
|
||||
if !ok {
|
||||
log.WithFields("path", parentStorePath, "nix-store-path", parentStorePath).Warn("found a nix store file for a non-existent package")
|
||||
log.WithFields("path", parentStorePath, "nix-store-path", parentStorePath).Debug("found a nix store file for a non-existent package")
|
||||
continue
|
||||
}
|
||||
appendFiles(p, files.ToSlice()...)
|
||||
@ -86,7 +86,7 @@ func (c *storeCataloger) Catalog(ctx context.Context, resolver file.Resolver) ([
|
||||
func appendFiles(p *pkg.Package, location ...file.Location) {
|
||||
metadata, ok := p.Metadata.(pkg.NixStoreEntry)
|
||||
if !ok {
|
||||
log.WithFields("package", p.Name).Warn("nix package metadata missing")
|
||||
log.WithFields("package", p.Name).Debug("nix package metadata missing")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@ -104,20 +104,20 @@ func findLicenses(ctx context.Context, scanner licenses.Scanner, resolver file.R
|
||||
// If we have a license file then resolve and parse it
|
||||
found, err := resolver.FilesByPath(m.LicenseLocation.Path())
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Tracef("unable to resolve python license path %s", m.LicenseLocation.Path())
|
||||
log.WithFields("error", err, "path", m.LicenseLocation.Path()).Trace("unable to resolve python license")
|
||||
}
|
||||
if len(found) > 0 {
|
||||
metadataContents, err := resolver.FileContentsByLocation(found[0])
|
||||
if err == nil {
|
||||
parsed, err := scanner.PkgSearch(ctx, file.NewLocationReadCloser(m.LicenseLocation, metadataContents))
|
||||
if err != nil {
|
||||
log.WithFields("error", err).Tracef("unable to parse a license from the file in %s", m.LicenseLocation.Path())
|
||||
log.WithFields("error", err, "path", m.LicenseLocation.Path()).Trace("unable to parse a license from the file")
|
||||
}
|
||||
if len(parsed) > 0 {
|
||||
licenseSet = pkg.NewLicenseSet(parsed...)
|
||||
}
|
||||
} else {
|
||||
log.WithFields("error", err).Tracef("unable to read file contents at %s", m.LicenseLocation.Path())
|
||||
log.WithFields("error", err, "path", m.LicenseLocation.Path()).Trace("unable to read file contents")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ func (rp requirementsParser) parseRequirementsTxt(_ context.Context, _ file.Reso
|
||||
|
||||
req := newRequirement(line)
|
||||
if req == nil {
|
||||
log.WithFields("path", reader.RealPath).Warnf("unable to parse requirements.txt line: %q", line)
|
||||
log.WithFields("path", reader.RealPath, "line", line).Debug("unable to parse requirements.txt line")
|
||||
errs = unknown.Appendf(errs, reader, "unable to parse requirements.txt line: %q", line)
|
||||
continue
|
||||
}
|
||||
@ -136,7 +136,7 @@ func (rp requirementsParser) parseRequirementsTxt(_ context.Context, _ file.Reso
|
||||
version := parseVersion(req.VersionConstraint, rp.guessUnpinnedRequirements)
|
||||
|
||||
if version == "" {
|
||||
log.WithFields("path", reader.RealPath).Tracef("unable to determine package version in requirements.txt line: %q", line)
|
||||
log.WithFields("path", reader.RealPath, "line", line).Trace("unable to determine package version in requirements.txt line")
|
||||
errs = unknown.Appendf(errs, reader, "unable to determine package version in requirements.txt line: %q", line)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ func extractRFC5322Fields(locationReader file.LocationReadCloser) (map[string]an
|
||||
|
||||
fields[key] = handleSingleOrMultiField(fields[key], val)
|
||||
} else {
|
||||
log.Warnf("cannot parse field from path: %q from line: %q", locationReader.Path(), line)
|
||||
log.Debugf("cannot parse field from path: %q from line: %q", locationReader.Path(), line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -25,12 +25,12 @@ func parseWheelOrEggRecord(reader io.Reader) []pkg.PythonFileRecord {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Warnf("unable to read python record file: %w", err)
|
||||
log.Debugf("unable to read python record file: %w", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(recordList) != 3 {
|
||||
log.Warnf("python record an unexpected length=%d: %q", len(recordList), recordList)
|
||||
log.Debugf("python record an unexpected length=%d: %q", len(recordList), recordList)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ func parseWheelOrEggRecord(reader io.Reader) []pkg.PythonFileRecord {
|
||||
}
|
||||
fields := strings.SplitN(item, "=", 2)
|
||||
if len(fields) != 2 {
|
||||
log.Warnf("unexpected python record digest: %q", item)
|
||||
log.Debugf("unexpected python record digest: %q", item)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -17,7 +17,7 @@ import (
|
||||
func NewDBCataloger() pkg.Cataloger {
|
||||
// check if a sqlite driver is available
|
||||
if !isSqliteDriverAvailable() {
|
||||
log.Warnf("sqlite driver is not available, newer RPM databases might not be cataloged")
|
||||
log.Debugf("sqlite driver is not available, newer RPM databases might not be cataloged")
|
||||
}
|
||||
|
||||
return generic.NewCataloger("rpm-db-cataloger").
|
||||
|
||||
@ -127,7 +127,7 @@ func extractRpmFileRecords(resolver file.PathResolver, entry rpmdb.PackageInfo)
|
||||
|
||||
files, err := entry.InstalledFiles()
|
||||
if err != nil {
|
||||
log.Warnf("unable to parse listing of installed files for RPM DB entry: %s", err.Error())
|
||||
log.Debugf("unable to parse listing of installed files for RPM DB entry: %s", err.Error())
|
||||
return records, fmt.Errorf("unable to parse listing of installed files for RPM DB entry: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@ -35,12 +35,12 @@ func parseRpmManifest(_ context.Context, _ file.Resolver, _ *generic.Environment
|
||||
|
||||
metadata, err := newMetadataFromManifestLine(strings.TrimSuffix(line, "\n"))
|
||||
if err != nil {
|
||||
log.Warnf("unable to parse RPM manifest entry: %+v", err)
|
||||
log.Debugf("unable to parse RPM manifest entry: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
log.Warn("unable to parse RPM manifest entry: no metadata found")
|
||||
log.Debug("unable to parse RPM manifest entry: no metadata found")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -42,7 +42,7 @@ func parseAuditBinaryEntry(reader unionreader.UnionReader, filename string) ([]r
|
||||
// with more than one binary
|
||||
readers, err := unionreader.GetReaders(reader)
|
||||
if err != nil {
|
||||
log.Warnf("rust cataloger: failed to open a binary: %v", err)
|
||||
log.Debugf("rust cataloger: failed to open a binary: %v", err)
|
||||
return nil, fmt.Errorf("rust cataloger: failed to open a binary: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@ -54,7 +54,7 @@ func (c *Collection) Package(id artifact.ID) *Package {
|
||||
}
|
||||
var p Package
|
||||
if err := copier.Copy(&p, &v); err != nil {
|
||||
log.Warnf("unable to copy package id=%q name=%q: %+v", id, v.Name, err)
|
||||
log.Debugf("unable to copy package id=%q name=%q: %+v", id, v.Name, err)
|
||||
return nil
|
||||
}
|
||||
p.id = v.id
|
||||
@ -111,7 +111,7 @@ func (c *Collection) add(p Package) {
|
||||
|
||||
id := p.ID()
|
||||
if id == "" {
|
||||
log.Warnf("found package with empty ID while adding to the collection: %+v", p)
|
||||
log.Debugf("found package with empty ID while adding to the collection: %+v", p)
|
||||
p.SetID()
|
||||
id = p.ID()
|
||||
}
|
||||
@ -119,7 +119,7 @@ func (c *Collection) add(p Package) {
|
||||
if existing, exists := c.byID[id]; exists {
|
||||
// there is already a package with this fingerprint merge the existing record with the new one
|
||||
if err := existing.merge(p); err != nil {
|
||||
log.Warnf("failed to merge packages: %+v", err)
|
||||
log.Debugf("failed to merge packages: %+v", err)
|
||||
} else {
|
||||
c.byID[id] = existing
|
||||
c.addPathsToIndex(p)
|
||||
|
||||
@ -38,7 +38,7 @@ func (p *Package) SetID() {
|
||||
id, err := artifact.IDByHash(p)
|
||||
if err != nil {
|
||||
// TODO: what to do in this case?
|
||||
log.Warnf("unable to get fingerprint of package=%s@%s: %+v", p.Name, p.Version, err)
|
||||
log.Debugf("unable to get fingerprint of package=%s@%s: %+v", p.Name, p.Version, err)
|
||||
return
|
||||
}
|
||||
p.id = id
|
||||
@ -59,7 +59,7 @@ func (p *Package) merge(other Package) error {
|
||||
}
|
||||
|
||||
if p.PURL != other.PURL {
|
||||
log.Warnf("merging packages have with different pURLs: %q=%q vs %q=%q", p.id, p.PURL, other.id, other.PURL)
|
||||
log.Debugf("merging packages have with different pURLs: %q=%q vs %q=%q", p.id, p.PURL, other.id, other.PURL)
|
||||
}
|
||||
|
||||
p.Locations.Add(other.Locations.ToSlice()...)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user