mirror of
https://github.com/anchore/syft.git
synced 2025-11-17 16:33:21 +01:00
make updates due to linter update
Signed-off-by: Alex Goodman <alex.goodman@anchore.com>
This commit is contained in:
parent
053768c6c6
commit
821210006d
@ -4,14 +4,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/anchore/stereoscope"
|
"github.com/anchore/stereoscope"
|
||||||
"github.com/anchore/syft/internal/config"
|
"github.com/anchore/syft/internal/config"
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
"github.com/anchore/syft/internal/logger"
|
"github.com/anchore/syft/internal/logger"
|
||||||
"github.com/anchore/syft/syft"
|
"github.com/anchore/syft/syft"
|
||||||
"github.com/gookit/color"
|
"github.com/gookit/color"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/wagoodman/go-partybus"
|
"github.com/wagoodman/go-partybus"
|
||||||
)
|
)
|
||||||
@ -67,6 +66,7 @@ func initCmdAliasBindings() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint:forbidigo
|
||||||
func initAppConfig() {
|
func initAppConfig() {
|
||||||
cfg, err := config.LoadApplicationConfig(viper.GetViper(), persistentOpts)
|
cfg, err := config.LoadApplicationConfig(viper.GetViper(), persistentOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -8,7 +8,6 @@ import (
|
|||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -87,7 +86,7 @@ func dockerImageValidArgsFunction(cmd *cobra.Command, args []string, toComplete
|
|||||||
}
|
}
|
||||||
|
|
||||||
func listLocalDockerImages(prefix string) ([]string, error) {
|
func listLocalDockerImages(prefix string) ([]string, error) {
|
||||||
var repoTags = make([]string, 0)
|
repoTags := make([]string, 0)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -102,7 +102,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setPackageFlags(flags *pflag.FlagSet) {
|
func setPackageFlags(flags *pflag.FlagSet) {
|
||||||
///////// Formatting & Input options //////////////////////////////////////////////
|
// Formatting & Input options //////////////////////////////////////////////
|
||||||
|
|
||||||
flags.StringP(
|
flags.StringP(
|
||||||
"scope", "s", source.SquashedScope.String(),
|
"scope", "s", source.SquashedScope.String(),
|
||||||
@ -118,7 +118,7 @@ func setPackageFlags(flags *pflag.FlagSet) {
|
|||||||
"file to write the report output to (default is STDOUT)",
|
"file to write the report output to (default is STDOUT)",
|
||||||
)
|
)
|
||||||
|
|
||||||
///////// Upload options //////////////////////////////////////////////////////////
|
// Upload options //////////////////////////////////////////////////////////
|
||||||
flags.StringP(
|
flags.StringP(
|
||||||
"host", "H", "",
|
"host", "H", "",
|
||||||
"the hostname or URL of the Anchore Enterprise instance to upload to",
|
"the hostname or URL of the Anchore Enterprise instance to upload to",
|
||||||
@ -151,7 +151,7 @@ func setPackageFlags(flags *pflag.FlagSet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func bindPackagesConfigOptions(flags *pflag.FlagSet) error {
|
func bindPackagesConfigOptions(flags *pflag.FlagSet) error {
|
||||||
///////// Formatting & Input options //////////////////////////////////////////////
|
// Formatting & Input options //////////////////////////////////////////////
|
||||||
|
|
||||||
if err := viper.BindPFlag("package.cataloger.scope", flags.Lookup("scope")); err != nil {
|
if err := viper.BindPFlag("package.cataloger.scope", flags.Lookup("scope")); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -165,7 +165,7 @@ func bindPackagesConfigOptions(flags *pflag.FlagSet) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
///////// Upload options //////////////////////////////////////////////////////////
|
// Upload options //////////////////////////////////////////////////////////
|
||||||
|
|
||||||
if err := viper.BindPFlag("anchore.host", flags.Lookup("host")); err != nil {
|
if err := viper.BindPFlag("anchore.host", flags.Lookup("host")); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -303,7 +303,7 @@ func runPackageSbomUpload(src *source.Source, s source.Metadata, catalog *pkg.Ca
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := c.Import(context.Background(), importCfg); err != nil {
|
if err := c.Import(context.Background(), importCfg); err != nil {
|
||||||
return fmt.Errorf("failed to upload results to host=%s: %+v", appConfig.Anchore.Host, err)
|
return fmt.Errorf("failed to upload results to host=%s: %w", appConfig.Anchore.Host, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,7 +17,7 @@ func reportWriter() (io.Writer, func() error, error) {
|
|||||||
case 0:
|
case 0:
|
||||||
return os.Stdout, nop, nil
|
return os.Stdout, nop, nil
|
||||||
default:
|
default:
|
||||||
reportFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
reportFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nop, fmt.Errorf("unable to create report file: %w", err)
|
return nil, nop, fmt.Errorf("unable to create report file: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,6 +24,7 @@ var rootCmd = &cobra.Command{
|
|||||||
ValidArgsFunction: packagesCmd.ValidArgsFunction,
|
ValidArgsFunction: packagesCmd.ValidArgsFunction,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint:forbidigo
|
||||||
func init() {
|
func init() {
|
||||||
// set universal flags
|
// set universal flags
|
||||||
rootCmd.PersistentFlags().StringVarP(&persistentOpts.ConfigPath, "config", "c", "", "application config file")
|
rootCmd.PersistentFlags().StringVarP(&persistentOpts.ConfigPath, "config", "c", "", "application config file")
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/version"
|
"github.com/anchore/syft/internal/version"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@ -24,6 +23,7 @@ func init() {
|
|||||||
rootCmd.AddCommand(versionCmd)
|
rootCmd.AddCommand(versionCmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint:forbidigo
|
||||||
func printVersion(_ *cobra.Command, _ []string) {
|
func printVersion(_ *cobra.Command, _ []string) {
|
||||||
versionInfo := version.FromBuild()
|
versionInfo := version.FromBuild()
|
||||||
|
|
||||||
|
|||||||
@ -6,8 +6,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/antihax/optional"
|
|
||||||
|
|
||||||
"github.com/anchore/client-go/pkg/external"
|
"github.com/anchore/client-go/pkg/external"
|
||||||
"github.com/anchore/stereoscope/pkg/image"
|
"github.com/anchore/stereoscope/pkg/image"
|
||||||
"github.com/anchore/syft/internal/bus"
|
"github.com/anchore/syft/internal/bus"
|
||||||
@ -15,6 +13,7 @@ import (
|
|||||||
"github.com/anchore/syft/syft/event"
|
"github.com/anchore/syft/syft/event"
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
|
"github.com/antihax/optional"
|
||||||
"github.com/wagoodman/go-partybus"
|
"github.com/wagoodman/go-partybus"
|
||||||
"github.com/wagoodman/go-progress"
|
"github.com/wagoodman/go-progress"
|
||||||
)
|
)
|
||||||
@ -64,7 +63,7 @@ func (c *Client) Import(ctx context.Context, cfg ImportConfig) error {
|
|||||||
stage.Current = "starting session"
|
stage.Current = "starting session"
|
||||||
startOperation, _, err := c.client.ImportsApi.CreateOperation(authedCtx)
|
startOperation, _, err := c.client.ImportsApi.CreateOperation(authedCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var detail = "no details given"
|
detail := "no details given"
|
||||||
var openAPIErr external.GenericOpenAPIError
|
var openAPIErr external.GenericOpenAPIError
|
||||||
if errors.As(err, &openAPIErr) {
|
if errors.As(err, &openAPIErr) {
|
||||||
detail = string(openAPIErr.Body())
|
detail = string(openAPIErr.Body())
|
||||||
@ -106,7 +105,7 @@ func (c *Client) Import(ctx context.Context, cfg ImportConfig) error {
|
|||||||
|
|
||||||
_, _, err = c.client.ImagesApi.AddImage(authedCtx, imageModel, &opts)
|
_, _, err = c.client.ImagesApi.AddImage(authedCtx, imageModel, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var detail = "no details given"
|
detail := "no details given"
|
||||||
var openAPIErr external.GenericOpenAPIError
|
var openAPIErr external.GenericOpenAPIError
|
||||||
if errors.As(err, &openAPIErr) {
|
if errors.As(err, &openAPIErr) {
|
||||||
detail = string(openAPIErr.Body())
|
detail = string(openAPIErr.Body())
|
||||||
@ -122,7 +121,7 @@ func (c *Client) Import(ctx context.Context, cfg ImportConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func addImageModel(imageMetadata image.Metadata, packageDigest, manifestDigest, dockerfileDigest, configDigest, sessionID string) external.ImageAnalysisRequest {
|
func addImageModel(imageMetadata image.Metadata, packageDigest, manifestDigest, dockerfileDigest, configDigest, sessionID string) external.ImageAnalysisRequest {
|
||||||
var tags = make([]string, len(imageMetadata.Tags))
|
tags := make([]string, len(imageMetadata.Tags))
|
||||||
for i, t := range imageMetadata.Tags {
|
for i, t := range imageMetadata.Tags {
|
||||||
tags[i] = t.String()
|
tags[i] = t.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,10 +8,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/wagoodman/go-progress"
|
|
||||||
|
|
||||||
"github.com/anchore/client-go/pkg/external"
|
"github.com/anchore/client-go/pkg/external"
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
|
"github.com/wagoodman/go-progress"
|
||||||
)
|
)
|
||||||
|
|
||||||
type configImportAPI interface {
|
type configImportAPI interface {
|
||||||
|
|||||||
@ -6,11 +6,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/wagoodman/go-progress"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
|
||||||
|
|
||||||
"github.com/anchore/client-go/pkg/external"
|
"github.com/anchore/client-go/pkg/external"
|
||||||
|
"github.com/anchore/syft/internal/log"
|
||||||
|
"github.com/wagoodman/go-progress"
|
||||||
)
|
)
|
||||||
|
|
||||||
type dockerfileImportAPI interface {
|
type dockerfileImportAPI interface {
|
||||||
|
|||||||
@ -8,10 +8,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/wagoodman/go-progress"
|
|
||||||
|
|
||||||
"github.com/anchore/client-go/pkg/external"
|
"github.com/anchore/client-go/pkg/external"
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
|
"github.com/wagoodman/go-progress"
|
||||||
)
|
)
|
||||||
|
|
||||||
type manifestImportAPI interface {
|
type manifestImportAPI interface {
|
||||||
|
|||||||
@ -8,16 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/presenter/packages"
|
|
||||||
|
|
||||||
"github.com/wagoodman/go-progress"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/distro"
|
|
||||||
"github.com/anchore/syft/syft/source"
|
|
||||||
|
|
||||||
"github.com/anchore/client-go/pkg/external"
|
"github.com/anchore/client-go/pkg/external"
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
|
"github.com/anchore/syft/internal/presenter/packages"
|
||||||
|
"github.com/anchore/syft/syft/distro"
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
|
"github.com/anchore/syft/syft/source"
|
||||||
|
"github.com/wagoodman/go-progress"
|
||||||
)
|
)
|
||||||
|
|
||||||
type packageSBOMImportAPI interface {
|
type packageSBOMImportAPI interface {
|
||||||
|
|||||||
@ -15,8 +15,10 @@ package bus
|
|||||||
|
|
||||||
import "github.com/wagoodman/go-partybus"
|
import "github.com/wagoodman/go-partybus"
|
||||||
|
|
||||||
var publisher partybus.Publisher
|
var (
|
||||||
var active bool
|
publisher partybus.Publisher
|
||||||
|
active bool
|
||||||
|
)
|
||||||
|
|
||||||
// SetPublisher sets the singleton event bus publisher. This is optional; if no bus is provided, the library will
|
// SetPublisher sets the singleton event bus publisher. This is optional; if no bus is provided, the library will
|
||||||
// behave no differently than if a bus had been provided.
|
// behave no differently than if a bus had been provided.
|
||||||
|
|||||||
@ -144,7 +144,6 @@ func (cfg *Application) parseConfigValues() error {
|
|||||||
func (cfg Application) String() string {
|
func (cfg Application) String() string {
|
||||||
// yaml is pretty human friendly (at least when compared to json)
|
// yaml is pretty human friendly (at least when compared to json)
|
||||||
appCfgStr, err := yaml.Marshal(&cfg)
|
appCfgStr, err := yaml.Marshal(&cfg)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err.Error()
|
return err.Error()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,7 +4,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/anchore/stereoscope/pkg/image"
|
"github.com/anchore/stereoscope/pkg/image"
|
||||||
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -58,7 +57,7 @@ func hasNonEmptyCredentials(username, password, token string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *registry) ToOptions() *image.RegistryOptions {
|
func (cfg *registry) ToOptions() *image.RegistryOptions {
|
||||||
var auth = make([]image.RegistryCredentials, len(cfg.Auth))
|
auth := make([]image.RegistryCredentials, len(cfg.Auth))
|
||||||
for i, a := range cfg.Auth {
|
for i, a := range cfg.Auth {
|
||||||
auth[i] = image.RegistryCredentials{
|
auth[i] = image.RegistryCredentials{
|
||||||
Authority: a.Authority,
|
Authority: a.Authority,
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -15,24 +16,23 @@ func CloseAndLogError(closer io.Closer, location string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type ErrPath struct {
|
type PathError struct {
|
||||||
Path string
|
Path string
|
||||||
Err error
|
Err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e ErrPath) Error() string {
|
func (e PathError) Error() string {
|
||||||
return fmt.Sprintf("unable to observe contents of %+v: %v", e.Path, e.Err)
|
return fmt.Sprintf("unable to observe contents of %+v: %v", e.Path, e.Err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsErrPath(err error) bool {
|
func IsPathError(err error) bool {
|
||||||
_, ok := err.(ErrPath)
|
return errors.As(err, &PathError{})
|
||||||
return ok
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsErrPathPermission(err error) bool {
|
func IsErrPathPermission(err error) bool {
|
||||||
pathErr, ok := err.(ErrPath)
|
var pathErr *PathError
|
||||||
if ok {
|
if errors.As(err, pathErr) {
|
||||||
return os.IsPermission(pathErr.Err)
|
return os.IsPermission(pathErr.Err)
|
||||||
}
|
}
|
||||||
return ok
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,12 +24,12 @@ const (
|
|||||||
|
|
||||||
const perFileReadLimit = 2 * GB
|
const perFileReadLimit = 2 * GB
|
||||||
|
|
||||||
type errZipSlipDetected struct {
|
type zipSlipDetectedError struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
JoinArgs []string
|
JoinArgs []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *errZipSlipDetected) Error() string {
|
func (e *zipSlipDetectedError) Error() string {
|
||||||
return fmt.Sprintf("paths are not allowed to resolve outside of the root prefix (%q). Destination: %q", e.Prefix, e.JoinArgs)
|
return fmt.Sprintf("paths are not allowed to resolve outside of the root prefix (%q). Destination: %q", e.Prefix, e.JoinArgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,7 +197,7 @@ func safeJoin(prefix string, dest ...string) (string, error) {
|
|||||||
joinResult := filepath.Join(append([]string{prefix}, dest...)...)
|
joinResult := filepath.Join(append([]string{prefix}, dest...)...)
|
||||||
cleanJoinResult := filepath.Clean(joinResult)
|
cleanJoinResult := filepath.Clean(joinResult)
|
||||||
if !strings.HasPrefix(cleanJoinResult, filepath.Clean(prefix)) {
|
if !strings.HasPrefix(cleanJoinResult, filepath.Clean(prefix)) {
|
||||||
return "", &errZipSlipDetected{
|
return "", &zipSlipDetectedError{
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
JoinArgs: dest,
|
JoinArgs: dest,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -299,7 +299,7 @@ func TestSafeJoin(t *testing.T) {
|
|||||||
"../../../etc/passwd",
|
"../../../etc/passwd",
|
||||||
},
|
},
|
||||||
expected: "",
|
expected: "",
|
||||||
errAssertion: assertErrorAs(&errZipSlipDetected{}),
|
errAssertion: assertErrorAs(&zipSlipDetectedError{}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
prefix: "/a/place",
|
prefix: "/a/place",
|
||||||
@ -308,7 +308,7 @@ func TestSafeJoin(t *testing.T) {
|
|||||||
"../",
|
"../",
|
||||||
},
|
},
|
||||||
expected: "",
|
expected: "",
|
||||||
errAssertion: assertErrorAs(&errZipSlipDetected{}),
|
errAssertion: assertErrorAs(&zipSlipDetectedError{}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
prefix: "/a/place",
|
prefix: "/a/place",
|
||||||
@ -316,7 +316,7 @@ func TestSafeJoin(t *testing.T) {
|
|||||||
"../",
|
"../",
|
||||||
},
|
},
|
||||||
expected: "",
|
expected: "",
|
||||||
errAssertion: assertErrorAs(&errZipSlipDetected{}),
|
errAssertion: assertErrorAs(&zipSlipDetectedError{}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package file
|
|||||||
import (
|
import (
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -91,7 +92,7 @@ func findArchiveStartOffset(r io.ReaderAt, size int64) (startOfArchive uint64, e
|
|||||||
bLen = size
|
bLen = size
|
||||||
}
|
}
|
||||||
buf = make([]byte, int(bLen))
|
buf = make([]byte, int(bLen))
|
||||||
if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
|
if _, err := r.ReadAt(buf, size-bLen); err != nil && !errors.Is(err, io.EOF) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if p := findSignatureInBlock(buf); p >= 0 {
|
if p := findSignatureInBlock(buf); p >= 0 {
|
||||||
|
|||||||
@ -11,7 +11,7 @@ import (
|
|||||||
prefixed "github.com/x-cray/logrus-prefixed-formatter"
|
prefixed "github.com/x-cray/logrus-prefixed-formatter"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultLogFilePermissions fs.FileMode = 0644
|
const defaultLogFilePermissions fs.FileMode = 0o644
|
||||||
|
|
||||||
// LogrusConfig contains all configurable values for the Logrus logger
|
// LogrusConfig contains all configurable values for the Logrus logger
|
||||||
type LogrusConfig struct {
|
type LogrusConfig struct {
|
||||||
|
|||||||
@ -38,18 +38,18 @@ func NewJSONPackages(catalog *pkg.Catalog) ([]JSONPackage, error) {
|
|||||||
|
|
||||||
// NewJSONPackage crates a new JSONPackage from the given pkg.Package.
|
// NewJSONPackage crates a new JSONPackage from the given pkg.Package.
|
||||||
func NewJSONPackage(p *pkg.Package) (JSONPackage, error) {
|
func NewJSONPackage(p *pkg.Package) (JSONPackage, error) {
|
||||||
var cpes = make([]string, len(p.CPEs))
|
cpes := make([]string, len(p.CPEs))
|
||||||
for i, c := range p.CPEs {
|
for i, c := range p.CPEs {
|
||||||
cpes[i] = c.BindToFmtString()
|
cpes[i] = c.BindToFmtString()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure collections are never nil for presentation reasons
|
// ensure collections are never nil for presentation reasons
|
||||||
var locations = make([]source.Location, 0)
|
locations := make([]source.Location, 0)
|
||||||
if p.Locations != nil {
|
if p.Locations != nil {
|
||||||
locations = p.Locations
|
locations = p.Locations
|
||||||
}
|
}
|
||||||
|
|
||||||
var licenses = make([]string, 0)
|
licenses := make([]string, 0)
|
||||||
if p.Licenses != nil {
|
if p.Licenses != nil {
|
||||||
licenses = p.Licenses
|
licenses = p.Licenses
|
||||||
}
|
}
|
||||||
|
|||||||
@ -135,7 +135,7 @@ func getSPDXHomepage(p *pkg.Package) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getSPDXSourceInfo(p *pkg.Package) string {
|
func getSPDXSourceInfo(p *pkg.Package) string {
|
||||||
answer := ""
|
var answer string
|
||||||
switch p.Type {
|
switch p.Type {
|
||||||
case pkg.RpmPkg:
|
case pkg.RpmPkg:
|
||||||
answer = "acquired package info from RPM DB"
|
answer = "acquired package info from RPM DB"
|
||||||
|
|||||||
@ -132,10 +132,10 @@ func newSPDXJsonElements(catalog *pkg.Catalog) ([]spdx22.Package, []spdx22.File,
|
|||||||
|
|
||||||
func cleanSPDXName(name string) string {
|
func cleanSPDXName(name string) string {
|
||||||
// remove # according to specification
|
// remove # according to specification
|
||||||
name = strings.Replace(name, "#", "-", -1)
|
name = strings.ReplaceAll(name, "#", "-")
|
||||||
|
|
||||||
// remove : for url construction
|
// remove : for url construction
|
||||||
name = strings.Replace(name, ":", "-", -1)
|
name = strings.ReplaceAll(name, ":", "-")
|
||||||
|
|
||||||
// clean relative pathing
|
// clean relative pathing
|
||||||
return path.Clean(name)
|
return path.Clean(name)
|
||||||
|
|||||||
@ -5,9 +5,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/spdxlicense"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
"github.com/anchore/syft/internal/spdxlicense"
|
||||||
"github.com/anchore/syft/internal/version"
|
"github.com/anchore/syft/internal/version"
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
|
|||||||
@ -6,9 +6,8 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/olekukonko/tablewriter"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
|
"github.com/olekukonko/tablewriter"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TablePresenter struct {
|
type TablePresenter struct {
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package packages
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"io"
|
"io"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
|
|
||||||
@ -32,7 +31,7 @@ func (pres *TextPresenter) Present(output io.Writer) error {
|
|||||||
|
|
||||||
switch pres.srcMetadata.Scheme {
|
switch pres.srcMetadata.Scheme {
|
||||||
case source.DirectoryScheme:
|
case source.DirectoryScheme:
|
||||||
fmt.Fprintln(w, fmt.Sprintf("[Path: %s]", pres.srcMetadata.Path))
|
fmt.Fprintf(w, "[Path: %s]\n", pres.srcMetadata.Path)
|
||||||
case source.ImageScheme:
|
case source.ImageScheme:
|
||||||
fmt.Fprintln(w, "[Image]")
|
fmt.Fprintln(w, "[Image]")
|
||||||
|
|
||||||
@ -51,7 +50,7 @@ func (pres *TextPresenter) Present(output io.Writer) error {
|
|||||||
// populate artifacts...
|
// populate artifacts...
|
||||||
rows := 0
|
rows := 0
|
||||||
for _, p := range pres.catalog.Sorted() {
|
for _, p := range pres.catalog.Sorted() {
|
||||||
fmt.Fprintln(w, fmt.Sprintf("[%s]", p.Name))
|
fmt.Fprintf(w, "[%s]\n", p.Name)
|
||||||
fmt.Fprintln(w, " Version:\t", p.Version)
|
fmt.Fprintln(w, " Version:\t", p.Version)
|
||||||
fmt.Fprintln(w, " Type:\t", string(p.Type))
|
fmt.Fprintln(w, " Type:\t", string(p.Type))
|
||||||
fmt.Fprintln(w, " Found by:\t", p.FoundBy)
|
fmt.Fprintln(w, " Found by:\t", p.FoundBy)
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/file"
|
"github.com/anchore/syft/syft/file"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@ -49,14 +50,20 @@ type LicenseList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
if err := run(); err != nil {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func run() error {
|
||||||
resp, err := http.Get(url)
|
resp, err := http.Get(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("unable to get licenses list: %+v", err)
|
return fmt.Errorf("unable to get licenses list: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var result LicenseList
|
var result LicenseList
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
if err = json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
log.Fatalf("unable to decode license list: %+v", err)
|
return fmt.Errorf("unable to decode license list: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := resp.Body.Close(); err != nil {
|
if err := resp.Body.Close(); err != nil {
|
||||||
@ -66,7 +73,7 @@ func main() {
|
|||||||
|
|
||||||
f, err := os.Create(source)
|
f, err := os.Create(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("unable to create %q: %+v", source, err)
|
return fmt.Errorf("unable to create %q: %w", source, err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
@ -89,8 +96,9 @@ func main() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("unable to generate template: %+v", err)
|
return fmt.Errorf("unable to generate template: %w", err)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parsing the provided SPDX license list necessitates a two pass approach.
|
// Parsing the provided SPDX license list necessitates a two pass approach.
|
||||||
@ -104,7 +112,7 @@ func main() {
|
|||||||
// We also sort the licenses for the second pass so that cases like `GPL-1` associate to `GPL-1.0` and not `GPL-1.1`.
|
// We also sort the licenses for the second pass so that cases like `GPL-1` associate to `GPL-1.0` and not `GPL-1.1`.
|
||||||
func processSPDXLicense(result LicenseList) map[string]string {
|
func processSPDXLicense(result LicenseList) map[string]string {
|
||||||
// first pass build map
|
// first pass build map
|
||||||
var licenseIDs = make(map[string]string)
|
licenseIDs := make(map[string]string)
|
||||||
for _, l := range result.Licenses {
|
for _, l := range result.Licenses {
|
||||||
cleanID := strings.ToLower(l.ID)
|
cleanID := strings.ToLower(l.ID)
|
||||||
if _, exists := licenseIDs[cleanID]; exists {
|
if _, exists := licenseIDs[cleanID]; exists {
|
||||||
|
|||||||
@ -7,11 +7,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
"github.com/gookit/color"
|
|
||||||
"github.com/wagoodman/jotframe/pkg/frame"
|
|
||||||
|
|
||||||
syftEventParsers "github.com/anchore/syft/syft/event/parsers"
|
syftEventParsers "github.com/anchore/syft/syft/event/parsers"
|
||||||
|
"github.com/gookit/color"
|
||||||
"github.com/wagoodman/go-partybus"
|
"github.com/wagoodman/go-partybus"
|
||||||
|
"github.com/wagoodman/jotframe/pkg/frame"
|
||||||
)
|
)
|
||||||
|
|
||||||
// handleCatalogerPresenterReady is a UI function for processing the CatalogerFinished bus event, displaying the catalog
|
// handleCatalogerPresenterReady is a UI function for processing the CatalogerFinished bus event, displaying the catalog
|
||||||
|
|||||||
@ -12,11 +12,13 @@ import (
|
|||||||
const valueNotProvided = "[not provided]"
|
const valueNotProvided = "[not provided]"
|
||||||
|
|
||||||
// all variables here are provided as build-time arguments, with clear default values
|
// all variables here are provided as build-time arguments, with clear default values
|
||||||
var version = valueNotProvided
|
var (
|
||||||
var gitCommit = valueNotProvided
|
version = valueNotProvided
|
||||||
var gitTreeState = valueNotProvided
|
gitCommit = valueNotProvided
|
||||||
var buildDate = valueNotProvided
|
gitTreeState = valueNotProvided
|
||||||
var platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
|
buildDate = valueNotProvided
|
||||||
|
platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
|
||||||
|
)
|
||||||
|
|
||||||
// Version defines the application version details (generally from build information)
|
// Version defines the application version details (generally from build information)
|
||||||
type Version struct {
|
type Version struct {
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
// nolint:forbidigo
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -45,11 +46,7 @@ func build() *jsonschema.Schema {
|
|||||||
reflector := &jsonschema.Reflector{
|
reflector := &jsonschema.Reflector{
|
||||||
AllowAdditionalProperties: true,
|
AllowAdditionalProperties: true,
|
||||||
TypeNamer: func(r reflect.Type) string {
|
TypeNamer: func(r reflect.Type) string {
|
||||||
name := r.Name()
|
return strings.TrimPrefix(r.Name(), "JSON")
|
||||||
if strings.HasPrefix(name, "JSON") {
|
|
||||||
name = strings.TrimPrefix(name, "JSON")
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
documentSchema := reflector.ReflectFromType(reflect.TypeOf(&poweruser.JSONDocument{}))
|
documentSchema := reflector.ReflectFromType(reflect.TypeOf(&poweruser.JSONDocument{}))
|
||||||
@ -74,7 +71,7 @@ func build() *jsonschema.Schema {
|
|||||||
// ensure the generated list of names is stable between runs
|
// ensure the generated list of names is stable between runs
|
||||||
sort.Strings(metadataNames)
|
sort.Strings(metadataNames)
|
||||||
|
|
||||||
var metadataTypes = []map[string]string{
|
metadataTypes := []map[string]string{
|
||||||
// allow for no metadata to be provided
|
// allow for no metadata to be provided
|
||||||
{"type": "null"},
|
{"type": "null"},
|
||||||
}
|
}
|
||||||
@ -93,13 +90,13 @@ func build() *jsonschema.Schema {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func encode(schema *jsonschema.Schema) []byte {
|
func encode(schema *jsonschema.Schema) []byte {
|
||||||
var newSchemaBuffer = new(bytes.Buffer)
|
newSchemaBuffer := new(bytes.Buffer)
|
||||||
enc := json.NewEncoder(newSchemaBuffer)
|
enc := json.NewEncoder(newSchemaBuffer)
|
||||||
// prevent > and < from being escaped in the payload
|
// prevent > and < from being escaped in the payload
|
||||||
enc.SetEscapeHTML(false)
|
enc.SetEscapeHTML(false)
|
||||||
enc.SetIndent("", " ")
|
enc.SetIndent("", " ")
|
||||||
err := enc.Encode(&schema)
|
|
||||||
if err != nil {
|
if err := enc.Encode(&schema); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -6,29 +6,26 @@ package parsers
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/presenter"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/file"
|
|
||||||
|
|
||||||
"github.com/wagoodman/go-progress"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/event"
|
"github.com/anchore/syft/syft/event"
|
||||||
|
"github.com/anchore/syft/syft/file"
|
||||||
"github.com/anchore/syft/syft/pkg/cataloger"
|
"github.com/anchore/syft/syft/pkg/cataloger"
|
||||||
|
"github.com/anchore/syft/syft/presenter"
|
||||||
"github.com/wagoodman/go-partybus"
|
"github.com/wagoodman/go-partybus"
|
||||||
|
"github.com/wagoodman/go-progress"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ErrBadPayload struct {
|
type badPayloadError struct {
|
||||||
Type partybus.EventType
|
Type partybus.EventType
|
||||||
Field string
|
Field string
|
||||||
Value interface{}
|
Value interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *ErrBadPayload) Error() string {
|
func (e *badPayloadError) Error() string {
|
||||||
return fmt.Sprintf("event='%s' has bad event payload field='%v': '%+v'", string(e.Type), e.Field, e.Value)
|
return fmt.Sprintf("event='%s' has bad event payload field='%v': '%+v'", string(e.Type), e.Field, e.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPayloadErr(t partybus.EventType, field string, value interface{}) error {
|
func newPayloadErr(t partybus.EventType, field string, value interface{}) error {
|
||||||
return &ErrBadPayload{
|
return &badPayloadError{
|
||||||
Type: t,
|
Type: t,
|
||||||
Field: field,
|
Field: field,
|
||||||
Value: value,
|
Value: value,
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
)
|
)
|
||||||
@ -65,7 +64,7 @@ func (i *ContentsCataloger) catalogLocation(resolver source.FileResolver, locati
|
|||||||
|
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
if _, err = io.Copy(base64.NewEncoder(base64.StdEncoding, buf), contentReader); err != nil {
|
if _, err = io.Copy(base64.NewEncoder(base64.StdEncoding, buf), contentReader); err != nil {
|
||||||
return "", internal.ErrPath{Path: location.RealPath, Err: err}
|
return "", internal.PathError{Path: location.RealPath, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.String(), nil
|
return buf.String(), nil
|
||||||
|
|||||||
@ -8,15 +8,12 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/bus"
|
"github.com/anchore/syft/internal/bus"
|
||||||
|
"github.com/anchore/syft/internal/log"
|
||||||
"github.com/anchore/syft/syft/event"
|
"github.com/anchore/syft/syft/event"
|
||||||
|
"github.com/anchore/syft/syft/source"
|
||||||
"github.com/wagoodman/go-partybus"
|
"github.com/wagoodman/go-partybus"
|
||||||
"github.com/wagoodman/go-progress"
|
"github.com/wagoodman/go-progress"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/source"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type DigestsCataloger struct {
|
type DigestsCataloger struct {
|
||||||
@ -72,7 +69,7 @@ func (i *DigestsCataloger) catalogLocation(resolver source.FileResolver, locatio
|
|||||||
|
|
||||||
size, err := io.Copy(io.MultiWriter(writers...), contentReader)
|
size, err := io.Copy(io.MultiWriter(writers...), contentReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, internal.ErrPath{Path: location.RealPath, Err: err}
|
return nil, internal.PathError{Path: location.RealPath, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
@ -99,7 +96,7 @@ func DigestAlgorithmName(hash crypto.Hash) string {
|
|||||||
|
|
||||||
func CleanDigestAlgorithmName(name string) string {
|
func CleanDigestAlgorithmName(name string) string {
|
||||||
lower := strings.ToLower(name)
|
lower := strings.ToLower(name)
|
||||||
return strings.Replace(lower, "-", "", -1)
|
return strings.ReplaceAll(lower, "-", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func digestsCatalogingProgress(locations int64) (*progress.Stage, *progress.Manual) {
|
func digestsCatalogingProgress(locations int64) (*progress.Stage, *progress.Manual) {
|
||||||
|
|||||||
@ -11,7 +11,7 @@ import (
|
|||||||
// GenerateSearchPatterns takes a set of named base patterns, a set of additional named patterns and an name exclusion list and generates a final
|
// GenerateSearchPatterns takes a set of named base patterns, a set of additional named patterns and an name exclusion list and generates a final
|
||||||
// set of regular expressions (indexed by name). The sets are aggregated roughly as such: (base - excluded) + additional.
|
// set of regular expressions (indexed by name). The sets are aggregated roughly as such: (base - excluded) + additional.
|
||||||
func GenerateSearchPatterns(basePatterns map[string]string, additionalPatterns map[string]string, excludePatternNames []string) (map[string]*regexp.Regexp, error) {
|
func GenerateSearchPatterns(basePatterns map[string]string, additionalPatterns map[string]string, excludePatternNames []string) (map[string]*regexp.Regexp, error) {
|
||||||
var regexObjs = make(map[string]*regexp.Regexp)
|
regexObjs := make(map[string]*regexp.Regexp)
|
||||||
var errs error
|
var errs error
|
||||||
|
|
||||||
addFn := func(name, pattern string) {
|
addFn := func(name, pattern string) {
|
||||||
|
|||||||
@ -9,8 +9,7 @@ import (
|
|||||||
"github.com/wagoodman/go-progress"
|
"github.com/wagoodman/go-progress"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MetadataCataloger struct {
|
type MetadataCataloger struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func NewMetadataCataloger() *MetadataCataloger {
|
func NewMetadataCataloger() *MetadataCataloger {
|
||||||
return &MetadataCataloger{}
|
return &MetadataCataloger{}
|
||||||
|
|||||||
@ -9,7 +9,6 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/bus"
|
"github.com/anchore/syft/internal/bus"
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
"github.com/anchore/syft/syft/event"
|
"github.com/anchore/syft/syft/event"
|
||||||
@ -82,7 +81,7 @@ func (i *SecretsCataloger) catalogLocation(resolver source.FileResolver, locatio
|
|||||||
// TODO: in the future we can swap out search strategies here
|
// TODO: in the future we can swap out search strategies here
|
||||||
secrets, err := catalogLocationByLine(resolver, location, i.patterns)
|
secrets, err := catalogLocationByLine(resolver, location, i.patterns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, internal.ErrPath{Path: location.RealPath, Err: err}
|
return nil, internal.PathError{Path: location.RealPath, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
if i.revealValues {
|
if i.revealValues {
|
||||||
|
|||||||
@ -9,7 +9,6 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,7 +19,7 @@ func catalogLocationByLine(resolver source.FileResolver, location source.Locatio
|
|||||||
}
|
}
|
||||||
defer internal.CloseAndLogError(readCloser, location.VirtualPath)
|
defer internal.CloseAndLogError(readCloser, location.VirtualPath)
|
||||||
|
|
||||||
var scanner = bufio.NewReader(readCloser)
|
scanner := bufio.NewReader(readCloser)
|
||||||
var position int64
|
var position int64
|
||||||
var allSecrets []SearchResult
|
var allSecrets []SearchResult
|
||||||
var lineNo int64
|
var lineNo int64
|
||||||
@ -30,7 +29,7 @@ func catalogLocationByLine(resolver source.FileResolver, location source.Locatio
|
|||||||
var line []byte
|
var line []byte
|
||||||
// TODO: we're at risk of large memory usage for very long lines
|
// TODO: we're at risk of large memory usage for very long lines
|
||||||
line, readErr = scanner.ReadBytes('\n')
|
line, readErr = scanner.ReadBytes('\n')
|
||||||
if readErr != nil && readErr != io.EOF {
|
if readErr != nil && !errors.Is(readErr, io.EOF) {
|
||||||
return nil, readErr
|
return nil, readErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,9 +118,9 @@ func extractSecretFromPosition(readCloser io.ReadCloser, name string, pattern *r
|
|||||||
}
|
}
|
||||||
|
|
||||||
// lineNoOfSecret are the number of lines which occur before the start of the secret value
|
// lineNoOfSecret are the number of lines which occur before the start of the secret value
|
||||||
var lineNoOfSecret = lineNo + int64(reader.newlinesBefore(start))
|
lineNoOfSecret := lineNo + int64(reader.newlinesBefore(start))
|
||||||
// lineOffsetOfSecret are the number of bytes that occur after the last newline but before the secret value.
|
// lineOffsetOfSecret are the number of bytes that occur after the last newline but before the secret value.
|
||||||
var lineOffsetOfSecret = start - reader.newlinePositionBefore(start)
|
lineOffsetOfSecret := start - reader.newlinePositionBefore(start)
|
||||||
if lineNoOfSecret == lineNo {
|
if lineNoOfSecret == lineNo {
|
||||||
// the secret value starts in the same line as the overall match, so we must consider that line offset
|
// the secret value starts in the same line as the overall match, so we must consider that line offset
|
||||||
lineOffsetOfSecret += lineOffset
|
lineOffsetOfSecret += lineOffset
|
||||||
|
|||||||
@ -3,13 +3,12 @@ package pkg
|
|||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/file"
|
|
||||||
|
|
||||||
"github.com/anchore/packageurl-go"
|
"github.com/anchore/packageurl-go"
|
||||||
|
"github.com/anchore/syft/syft/file"
|
||||||
"github.com/scylladb/go-set/strset"
|
"github.com/scylladb/go-set/strset"
|
||||||
)
|
)
|
||||||
|
|
||||||
const ApkDbGlob = "**/lib/apk/db/installed"
|
const ApkDBGlob = "**/lib/apk/db/installed"
|
||||||
|
|
||||||
var _ FileOwner = (*ApkMetadata)(nil)
|
var _ FileOwner = (*ApkMetadata)(nil)
|
||||||
|
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -67,8 +66,7 @@ func (c *Catalog) Add(p Package) {
|
|||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
defer c.lock.Unlock()
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
_, exists := c.byID[p.ID]
|
if _, exists := c.byID[p.ID]; exists {
|
||||||
if exists {
|
|
||||||
log.Errorf("package ID already exists in the catalog : id=%+v %+v", p.ID, p)
|
log.Errorf("package ID already exists in the catalog : id=%+v %+v", p.ID, p)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,7 +11,7 @@ import (
|
|||||||
// NewApkdbCataloger returns a new Alpine DB cataloger object.
|
// NewApkdbCataloger returns a new Alpine DB cataloger object.
|
||||||
func NewApkdbCataloger() *common.GenericCataloger {
|
func NewApkdbCataloger() *common.GenericCataloger {
|
||||||
globParsers := map[string]common.ParserFn{
|
globParsers := map[string]common.ParserFn{
|
||||||
pkg.ApkDbGlob: parseApkDB,
|
pkg.ApkDBGlob: parseApkDB,
|
||||||
}
|
}
|
||||||
|
|
||||||
return common.NewGenericCataloger(nil, globParsers, "apkdb-cataloger")
|
return common.NewGenericCataloger(nil, globParsers, "apkdb-cataloger")
|
||||||
|
|||||||
@ -8,9 +8,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/file"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
|
"github.com/anchore/syft/syft/file"
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/pkg/cataloger/common"
|
"github.com/anchore/syft/syft/pkg/cataloger/common"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
|
|||||||
@ -66,7 +66,7 @@ func (c *GenericCataloger) Catalog(resolver source.FileResolver) ([]pkg.Package,
|
|||||||
|
|
||||||
// SelectFiles takes a set of file trees and resolves and file references of interest for future cataloging
|
// SelectFiles takes a set of file trees and resolves and file references of interest for future cataloging
|
||||||
func (c *GenericCataloger) selectFiles(resolver source.FilePathResolver) map[source.Location]ParserFn {
|
func (c *GenericCataloger) selectFiles(resolver source.FilePathResolver) map[source.Location]ParserFn {
|
||||||
var parserByLocation = make(map[source.Location]ParserFn)
|
parserByLocation := make(map[source.Location]ParserFn)
|
||||||
|
|
||||||
// select by exact path
|
// select by exact path
|
||||||
for path, parser := range c.pathParsers {
|
for path, parser := range c.pathParsers {
|
||||||
|
|||||||
@ -11,7 +11,6 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
@ -36,9 +35,9 @@ func (c *Cataloger) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Catalog is given an object to resolve file references and content, this function returns any discovered Packages after analyzing dpkg support files.
|
// Catalog is given an object to resolve file references and content, this function returns any discovered Packages after analyzing dpkg support files.
|
||||||
// nolint:funlen
|
|
||||||
func (c *Cataloger) Catalog(resolver source.FileResolver) ([]pkg.Package, error) {
|
func (c *Cataloger) Catalog(resolver source.FileResolver) ([]pkg.Package, error) {
|
||||||
dbFileMatches, err := resolver.FilesByGlob(pkg.DpkgDbGlob)
|
dbFileMatches, err := resolver.FilesByGlob(pkg.DpkgDBGlob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to find dpkg status files's by glob: %w", err)
|
return nil, fmt.Errorf("failed to find dpkg status files's by glob: %w", err)
|
||||||
}
|
}
|
||||||
@ -80,20 +79,21 @@ func addLicenses(resolver source.FileResolver, dbLocation source.Location, p *pk
|
|||||||
// get license information from the copyright file
|
// get license information from the copyright file
|
||||||
copyrightReader, copyrightLocation := fetchCopyrightContents(resolver, dbLocation, p)
|
copyrightReader, copyrightLocation := fetchCopyrightContents(resolver, dbLocation, p)
|
||||||
|
|
||||||
if copyrightReader != nil {
|
if copyrightReader != nil && copyrightLocation != nil {
|
||||||
defer internal.CloseAndLogError(copyrightReader, copyrightLocation.VirtualPath)
|
defer internal.CloseAndLogError(copyrightReader, copyrightLocation.VirtualPath)
|
||||||
// attach the licenses
|
// attach the licenses
|
||||||
p.Licenses = parseLicensesFromCopyright(copyrightReader)
|
p.Licenses = parseLicensesFromCopyright(copyrightReader)
|
||||||
|
|
||||||
// keep a record of the file where this was discovered
|
// keep a record of the file where this was discovered
|
||||||
if copyrightLocation != nil {
|
|
||||||
p.Locations = append(p.Locations, *copyrightLocation)
|
p.Locations = append(p.Locations, *copyrightLocation)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func mergeFileListing(resolver source.FileResolver, dbLocation source.Location, p *pkg.Package) {
|
func mergeFileListing(resolver source.FileResolver, dbLocation source.Location, p *pkg.Package) {
|
||||||
metadata := p.Metadata.(pkg.DpkgMetadata)
|
metadata, ok := p.Metadata.(pkg.DpkgMetadata)
|
||||||
|
if !ok {
|
||||||
|
log.Warnf("unable to get DPKG metadata while merging file info")
|
||||||
|
}
|
||||||
|
|
||||||
// get file listing (package files + additional config files)
|
// get file listing (package files + additional config files)
|
||||||
files, infoLocations := getAdditionalFileListing(resolver, dbLocation, p)
|
files, infoLocations := getAdditionalFileListing(resolver, dbLocation, p)
|
||||||
@ -122,34 +122,30 @@ loopNewFiles:
|
|||||||
|
|
||||||
func getAdditionalFileListing(resolver source.FileResolver, dbLocation source.Location, p *pkg.Package) ([]pkg.DpkgFileRecord, []source.Location) {
|
func getAdditionalFileListing(resolver source.FileResolver, dbLocation source.Location, p *pkg.Package) ([]pkg.DpkgFileRecord, []source.Location) {
|
||||||
// ensure the default value for a collection is never nil since this may be shown as JSON
|
// ensure the default value for a collection is never nil since this may be shown as JSON
|
||||||
var files = make([]pkg.DpkgFileRecord, 0)
|
files := make([]pkg.DpkgFileRecord, 0)
|
||||||
var locations []source.Location
|
var locations []source.Location
|
||||||
|
|
||||||
md5Reader, md5Location := fetchMd5Contents(resolver, dbLocation, p)
|
md5Reader, md5Location := fetchMd5Contents(resolver, dbLocation, p)
|
||||||
|
|
||||||
if md5Reader != nil {
|
if md5Reader != nil && md5Location != nil {
|
||||||
defer internal.CloseAndLogError(md5Reader, md5Location.VirtualPath)
|
defer internal.CloseAndLogError(md5Reader, md5Location.VirtualPath)
|
||||||
// attach the file list
|
// attach the file list
|
||||||
files = append(files, parseDpkgMD5Info(md5Reader)...)
|
files = append(files, parseDpkgMD5Info(md5Reader)...)
|
||||||
|
|
||||||
// keep a record of the file where this was discovered
|
// keep a record of the file where this was discovered
|
||||||
if md5Location != nil {
|
|
||||||
locations = append(locations, *md5Location)
|
locations = append(locations, *md5Location)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
conffilesReader, conffilesLocation := fetchConffileContents(resolver, dbLocation, p)
|
conffilesReader, conffilesLocation := fetchConffileContents(resolver, dbLocation, p)
|
||||||
|
|
||||||
if conffilesReader != nil {
|
if conffilesReader != nil && conffilesLocation != nil {
|
||||||
defer internal.CloseAndLogError(conffilesReader, conffilesLocation.VirtualPath)
|
defer internal.CloseAndLogError(conffilesReader, conffilesLocation.VirtualPath)
|
||||||
// attach the file list
|
// attach the file list
|
||||||
files = append(files, parseDpkgConffileInfo(md5Reader)...)
|
files = append(files, parseDpkgConffileInfo(md5Reader)...)
|
||||||
|
|
||||||
// keep a record of the file where this was discovered
|
// keep a record of the file where this was discovered
|
||||||
if conffilesLocation != nil {
|
|
||||||
locations = append(locations, *conffilesLocation)
|
locations = append(locations, *conffilesLocation)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return files, locations
|
return files, locations
|
||||||
}
|
}
|
||||||
@ -228,7 +224,10 @@ func fetchCopyrightContents(resolver source.FileResolver, dbLocation source.Loca
|
|||||||
}
|
}
|
||||||
|
|
||||||
func md5Key(p *pkg.Package) string {
|
func md5Key(p *pkg.Package) string {
|
||||||
metadata := p.Metadata.(pkg.DpkgMetadata)
|
metadata, ok := p.Metadata.(pkg.DpkgMetadata)
|
||||||
|
if !ok {
|
||||||
|
log.Warnf("unable to get DPKG metadata while fetching md5 key")
|
||||||
|
}
|
||||||
|
|
||||||
contentKey := p.Name
|
contentKey := p.Name
|
||||||
if metadata.Architecture != "" && metadata.Architecture != "all" {
|
if metadata.Architecture != "" && metadata.Architecture != "all" {
|
||||||
|
|||||||
@ -10,7 +10,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
@ -23,7 +22,7 @@ var (
|
|||||||
// parseDpkgStatus is a parser function for Debian DB status contents, returning all Debian packages listed.
|
// parseDpkgStatus is a parser function for Debian DB status contents, returning all Debian packages listed.
|
||||||
func parseDpkgStatus(reader io.Reader) ([]pkg.Package, error) {
|
func parseDpkgStatus(reader io.Reader) ([]pkg.Package, error) {
|
||||||
buffedReader := bufio.NewReader(reader)
|
buffedReader := bufio.NewReader(reader)
|
||||||
var packages = make([]pkg.Package, 0)
|
packages := make([]pkg.Package, 0)
|
||||||
|
|
||||||
continueProcessing := true
|
continueProcessing := true
|
||||||
for continueProcessing {
|
for continueProcessing {
|
||||||
@ -152,7 +151,7 @@ func extractSourceVersion(source string) (string, string) {
|
|||||||
// handleNewKeyValue parse a new key-value pair from the given unprocessed line
|
// handleNewKeyValue parse a new key-value pair from the given unprocessed line
|
||||||
func handleNewKeyValue(line string) (string, interface{}, error) {
|
func handleNewKeyValue(line string) (string, interface{}, error) {
|
||||||
if i := strings.Index(line, ":"); i > 0 {
|
if i := strings.Index(line, ":"); i > 0 {
|
||||||
var key = strings.TrimSpace(line[0:i])
|
key := strings.TrimSpace(line[0:i])
|
||||||
// mapstruct cant handle "-"
|
// mapstruct cant handle "-"
|
||||||
key = strings.ReplaceAll(key, "-", "")
|
key = strings.ReplaceAll(key, "-", "")
|
||||||
val := strings.TrimSpace(line[i+1:])
|
val := strings.TrimSpace(line[i+1:])
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -6,9 +6,8 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/file"
|
"github.com/anchore/syft/internal/file"
|
||||||
|
"github.com/anchore/syft/internal/log"
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/pkg/cataloger/common"
|
"github.com/anchore/syft/syft/pkg/cataloger/common"
|
||||||
)
|
)
|
||||||
@ -81,7 +80,7 @@ func newJavaArchiveParser(virtualPath string, reader io.Reader, detectNested boo
|
|||||||
|
|
||||||
// parse the loaded archive and return all packages found.
|
// parse the loaded archive and return all packages found.
|
||||||
func (j *archiveParser) parse() ([]pkg.Package, error) {
|
func (j *archiveParser) parse() ([]pkg.Package, error) {
|
||||||
var pkgs = make([]pkg.Package, 0)
|
pkgs := make([]pkg.Package, 0)
|
||||||
|
|
||||||
// find the parent package from the java manifest
|
// find the parent package from the java manifest
|
||||||
parentPkg, err := j.discoverMainPackage()
|
parentPkg, err := j.discoverMainPackage()
|
||||||
@ -190,7 +189,7 @@ func (j *archiveParser) discoverPkgsFromAllMavenFiles(parentPkg *pkg.Package) ([
|
|||||||
// discoverPkgsFromNestedArchives finds Java archives within Java archives, returning all listed Java packages found and
|
// discoverPkgsFromNestedArchives finds Java archives within Java archives, returning all listed Java packages found and
|
||||||
// associating each discovered package to the given parent package.
|
// associating each discovered package to the given parent package.
|
||||||
func (j *archiveParser) discoverPkgsFromNestedArchives(parentPkg *pkg.Package) ([]pkg.Package, error) {
|
func (j *archiveParser) discoverPkgsFromNestedArchives(parentPkg *pkg.Package) ([]pkg.Package, error) {
|
||||||
var pkgs = make([]pkg.Package, 0)
|
pkgs := make([]pkg.Package, 0)
|
||||||
|
|
||||||
// search and parse pom.properties files & fetch the contents
|
// search and parse pom.properties files & fetch the contents
|
||||||
openers, err := file.ExtractFromZipToUniqueTempFile(j.archivePath, j.contentPath, j.fileManifest.GlobMatch(archiveFormatGlobs...)...)
|
openers, err := file.ExtractFromZipToUniqueTempFile(j.archivePath, j.contentPath, j.fileManifest.GlobMatch(archiveFormatGlobs...)...)
|
||||||
@ -326,7 +325,10 @@ func packageIdentitiesMatch(p pkg.Package, parentPkg *pkg.Package) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata := p.Metadata.(pkg.JavaMetadata)
|
metadata, ok := p.Metadata.(pkg.JavaMetadata)
|
||||||
|
if !ok {
|
||||||
|
log.Warnf("unable to get java metadata while determining package identities")
|
||||||
|
}
|
||||||
|
|
||||||
// the virtual path matches...
|
// the virtual path matches...
|
||||||
if parentPkg.Metadata.(pkg.JavaMetadata).VirtualPath == metadata.VirtualPath {
|
if parentPkg.Metadata.(pkg.JavaMetadata).VirtualPath == metadata.VirtualPath {
|
||||||
|
|||||||
@ -8,7 +8,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -26,7 +26,7 @@ func saveArchiveToTmp(reader io.Reader) (string, string, func(), error) {
|
|||||||
archivePath := filepath.Join(tempDir, "archive")
|
archivePath := filepath.Join(tempDir, "archive")
|
||||||
contentDir := filepath.Join(tempDir, "contents")
|
contentDir := filepath.Join(tempDir, "contents")
|
||||||
|
|
||||||
err = os.Mkdir(contentDir, 0755)
|
err = os.Mkdir(contentDir, 0o755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return contentDir, "", cleanupFn, fmt.Errorf("unable to create processing tempdir: %w", err)
|
return contentDir, "", cleanupFn, fmt.Errorf("unable to create processing tempdir: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,14 +7,11 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
"github.com/anchore/syft/internal/log"
|
||||||
"github.com/mitchellh/mapstructure"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/pkg/cataloger/common"
|
"github.com/anchore/syft/syft/pkg/cataloger/common"
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
|
||||||
// integrity check
|
// integrity check
|
||||||
@ -168,7 +165,7 @@ func parsePackageJSON(_ string, reader io.Reader) ([]pkg.Package, error) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
var p PackageJSON
|
var p PackageJSON
|
||||||
if err := dec.Decode(&p); err == io.EOF {
|
if err := dec.Decode(&p); errors.Is(err, io.EOF) {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse package.json file: %w", err)
|
return nil, fmt.Errorf("failed to parse package.json file: %w", err)
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package javascript
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
@ -40,7 +41,7 @@ func parsePackageLock(path string, reader io.Reader) ([]pkg.Package, error) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
var lock PackageLock
|
var lock PackageLock
|
||||||
if err := dec.Decode(&lock); err == io.EOF {
|
if err := dec.Decode(&lock); errors.Is(err, io.EOF) {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse package-lock.json file: %w", err)
|
return nil, fmt.Errorf("failed to parse package-lock.json file: %w", err)
|
||||||
|
|||||||
@ -20,9 +20,9 @@ func generatePackageURL(p pkg.Package, d *distro.Distro) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var purlType = p.Type.PackageURLType()
|
purlType := p.Type.PackageURLType()
|
||||||
var name = p.Name
|
name := p.Name
|
||||||
var namespace = ""
|
namespace := ""
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case purlType == "":
|
case purlType == "":
|
||||||
|
|||||||
@ -6,9 +6,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -149,7 +147,7 @@ func (c *PackageCataloger) fetchTopLevelPackages(resolver source.FileResolver, m
|
|||||||
|
|
||||||
// assembleEggOrWheelMetadata discovers and accumulates python package metadata from multiple file sources and returns a single metadata object as well as a list of files where the metadata was derived from.
|
// assembleEggOrWheelMetadata discovers and accumulates python package metadata from multiple file sources and returns a single metadata object as well as a list of files where the metadata was derived from.
|
||||||
func (c *PackageCataloger) assembleEggOrWheelMetadata(resolver source.FileResolver, metadataLocation source.Location) (*pkg.PythonPackageMetadata, []source.Location, error) {
|
func (c *PackageCataloger) assembleEggOrWheelMetadata(resolver source.FileResolver, metadataLocation source.Location) (*pkg.PythonPackageMetadata, []source.Location, error) {
|
||||||
var sources = []source.Location{metadataLocation}
|
sources := []source.Location{metadataLocation}
|
||||||
|
|
||||||
metadataContents, err := resolver.FileContentsByLocation(metadataLocation)
|
metadataContents, err := resolver.FileContentsByLocation(metadataLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package python
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
@ -43,7 +44,7 @@ func parsePipfileLock(_ string, reader io.Reader) ([]pkg.Package, error) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
var lock PipfileLock
|
var lock PipfileLock
|
||||||
if err := dec.Decode(&lock); err == io.EOF {
|
if err := dec.Decode(&lock); errors.Is(err, io.EOF) {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse Pipfile.lock file: %w", err)
|
return nil, fmt.Errorf("failed to parse Pipfile.lock file: %w", err)
|
||||||
|
|||||||
@ -16,13 +16,13 @@ var _ common.ParserFn = parsePoetryLock
|
|||||||
func parsePoetryLock(_ string, reader io.Reader) ([]pkg.Package, error) {
|
func parsePoetryLock(_ string, reader io.Reader) ([]pkg.Package, error) {
|
||||||
tree, err := toml.LoadReader(reader)
|
tree, err := toml.LoadReader(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to load poetry.lock for parsing: %v", err)
|
return nil, fmt.Errorf("unable to load poetry.lock for parsing: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata := PoetryMetadata{}
|
metadata := PoetryMetadata{}
|
||||||
err = tree.Unmarshal(&metadata)
|
err = tree.Unmarshal(&metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to parse poetry.lock: %v", err)
|
return nil, fmt.Errorf("unable to parse poetry.lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return metadata.Pkgs(), nil
|
return metadata.Pkgs(), nil
|
||||||
|
|||||||
@ -9,10 +9,8 @@ import (
|
|||||||
|
|
||||||
"github.com/anchore/syft/internal/file"
|
"github.com/anchore/syft/internal/file"
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
|
|
||||||
"github.com/mitchellh/mapstructure"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
|
||||||
// parseWheelOrEggMetadata takes a Python Egg or Wheel (which share the same format and values for our purposes),
|
// parseWheelOrEggMetadata takes a Python Egg or Wheel (which share the same format and values for our purposes),
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package python
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
@ -17,7 +18,7 @@ func parseWheelOrEggRecord(reader io.Reader) ([]pkg.PythonFileRecord, error) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
recordList, err := r.Read()
|
recordList, err := r.Read()
|
||||||
if err == io.EOF {
|
if errors.Is(err, io.EOF) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -7,7 +7,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
)
|
)
|
||||||
@ -28,7 +27,7 @@ func (c *Cataloger) Name() string {
|
|||||||
|
|
||||||
// Catalog is given an object to resolve file references and content, this function returns any discovered Packages after analyzing rpm db installation.
|
// Catalog is given an object to resolve file references and content, this function returns any discovered Packages after analyzing rpm db installation.
|
||||||
func (c *Cataloger) Catalog(resolver source.FileResolver) ([]pkg.Package, error) {
|
func (c *Cataloger) Catalog(resolver source.FileResolver) ([]pkg.Package, error) {
|
||||||
fileMatches, err := resolver.FilesByGlob(pkg.RpmDbGlob)
|
fileMatches, err := resolver.FilesByGlob(pkg.RpmDBGlob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to find rpmdb's by glob: %w", err)
|
return nil, fmt.Errorf("failed to find rpmdb's by glob: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,11 +6,10 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/file"
|
|
||||||
|
|
||||||
rpmdb "github.com/anchore/go-rpmdb/pkg"
|
rpmdb "github.com/anchore/go-rpmdb/pkg"
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
"github.com/anchore/syft/internal/log"
|
"github.com/anchore/syft/internal/log"
|
||||||
|
"github.com/anchore/syft/syft/file"
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/source"
|
"github.com/anchore/syft/syft/source"
|
||||||
)
|
)
|
||||||
@ -90,7 +89,7 @@ func toELVersion(metadata pkg.RpmdbMetadata) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func extractRpmdbFileRecords(resolver source.FilePathResolver, entry *rpmdb.PackageInfo) []pkg.RpmdbFileRecord {
|
func extractRpmdbFileRecords(resolver source.FilePathResolver, entry *rpmdb.PackageInfo) []pkg.RpmdbFileRecord {
|
||||||
var records = make([]pkg.RpmdbFileRecord, 0)
|
records := make([]pkg.RpmdbFileRecord, 0)
|
||||||
|
|
||||||
for _, record := range entry.Files {
|
for _, record := range entry.Files {
|
||||||
// only persist RPMDB file records which exist in the image/directory, otherwise ignore them
|
// only persist RPMDB file records which exist in the image/directory, otherwise ignore them
|
||||||
|
|||||||
@ -9,11 +9,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal"
|
"github.com/anchore/syft/internal"
|
||||||
|
|
||||||
"github.com/mitchellh/mapstructure"
|
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/pkg"
|
"github.com/anchore/syft/syft/pkg"
|
||||||
"github.com/anchore/syft/syft/pkg/cataloger/common"
|
"github.com/anchore/syft/syft/pkg/cataloger/common"
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
|
||||||
// integrity check
|
// integrity check
|
||||||
@ -62,7 +60,7 @@ func processList(s string) []string {
|
|||||||
|
|
||||||
func parseGemSpecEntries(_ string, reader io.Reader) ([]pkg.Package, error) {
|
func parseGemSpecEntries(_ string, reader io.Reader) ([]pkg.Package, error) {
|
||||||
var pkgs []pkg.Package
|
var pkgs []pkg.Package
|
||||||
var fields = make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
|
|||||||
@ -16,13 +16,13 @@ var _ common.ParserFn = parseCargoLock
|
|||||||
func parseCargoLock(_ string, reader io.Reader) ([]pkg.Package, error) {
|
func parseCargoLock(_ string, reader io.Reader) ([]pkg.Package, error) {
|
||||||
tree, err := toml.LoadReader(reader)
|
tree, err := toml.LoadReader(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to load Cargo.lock for parsing: %v", err)
|
return nil, fmt.Errorf("unable to load Cargo.lock for parsing: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata := CargoMetadata{}
|
metadata := CargoMetadata{}
|
||||||
err = tree.Unmarshal(&metadata)
|
err = tree.Unmarshal(&metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to parse Cargo.lock: %v", err)
|
return nil, fmt.Errorf("unable to parse Cargo.lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return metadata.Pkgs(), nil
|
return metadata.Pkgs(), nil
|
||||||
|
|||||||
@ -3,14 +3,13 @@ package pkg
|
|||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/file"
|
|
||||||
|
|
||||||
"github.com/anchore/packageurl-go"
|
"github.com/anchore/packageurl-go"
|
||||||
"github.com/anchore/syft/syft/distro"
|
"github.com/anchore/syft/syft/distro"
|
||||||
|
"github.com/anchore/syft/syft/file"
|
||||||
"github.com/scylladb/go-set/strset"
|
"github.com/scylladb/go-set/strset"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DpkgDbGlob = "**/var/lib/dpkg/{status,status.d/**}"
|
const DpkgDBGlob = "**/var/lib/dpkg/{status,status.d/**}"
|
||||||
|
|
||||||
var _ FileOwner = (*DpkgMetadata)(nil)
|
var _ FileOwner = (*DpkgMetadata)(nil)
|
||||||
|
|
||||||
|
|||||||
@ -9,9 +9,9 @@ import (
|
|||||||
var globsForbiddenFromBeingOwned = []string{
|
var globsForbiddenFromBeingOwned = []string{
|
||||||
// any OS DBs should automatically be ignored to prevent cyclic issues (e.g. the "rpm" RPM owns the path to the
|
// any OS DBs should automatically be ignored to prevent cyclic issues (e.g. the "rpm" RPM owns the path to the
|
||||||
// RPM DB, so if not ignored that package would own all other packages on the system).
|
// RPM DB, so if not ignored that package would own all other packages on the system).
|
||||||
ApkDbGlob,
|
ApkDBGlob,
|
||||||
DpkgDbGlob,
|
DpkgDBGlob,
|
||||||
RpmDbGlob,
|
RpmDBGlob,
|
||||||
// DEB packages share common copyright info between, this does not mean that sharing these paths implies ownership.
|
// DEB packages share common copyright info between, this does not mean that sharing these paths implies ownership.
|
||||||
"/usr/share/doc/**/copyright",
|
"/usr/share/doc/**/copyright",
|
||||||
}
|
}
|
||||||
@ -21,7 +21,7 @@ type ownershipByFilesMetadata struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ownershipByFilesRelationships(catalog *Catalog) []Relationship {
|
func ownershipByFilesRelationships(catalog *Catalog) []Relationship {
|
||||||
var relationships = findOwnershipByFilesRelationships(catalog)
|
relationships := findOwnershipByFilesRelationships(catalog)
|
||||||
|
|
||||||
var edges []Relationship
|
var edges []Relationship
|
||||||
for parent, children := range relationships {
|
for parent, children := range relationships {
|
||||||
@ -43,7 +43,7 @@ func ownershipByFilesRelationships(catalog *Catalog) []Relationship {
|
|||||||
// findOwnershipByFilesRelationships find overlaps in file ownership with a file that defines another package. Specifically, a .Location.Path of
|
// findOwnershipByFilesRelationships find overlaps in file ownership with a file that defines another package. Specifically, a .Location.Path of
|
||||||
// a package is found to be owned by another (from the owner's .Metadata.Files[]).
|
// a package is found to be owned by another (from the owner's .Metadata.Files[]).
|
||||||
func findOwnershipByFilesRelationships(catalog *Catalog) map[ID]map[ID]*strset.Set {
|
func findOwnershipByFilesRelationships(catalog *Catalog) map[ID]map[ID]*strset.Set {
|
||||||
var relationships = make(map[ID]map[ID]*strset.Set)
|
relationships := make(map[ID]map[ID]*strset.Set)
|
||||||
|
|
||||||
if catalog == nil {
|
if catalog == nil {
|
||||||
return relationships
|
return relationships
|
||||||
|
|||||||
@ -5,15 +5,13 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/anchore/syft/syft/file"
|
|
||||||
|
|
||||||
"github.com/scylladb/go-set/strset"
|
|
||||||
|
|
||||||
"github.com/anchore/packageurl-go"
|
"github.com/anchore/packageurl-go"
|
||||||
"github.com/anchore/syft/syft/distro"
|
"github.com/anchore/syft/syft/distro"
|
||||||
|
"github.com/anchore/syft/syft/file"
|
||||||
|
"github.com/scylladb/go-set/strset"
|
||||||
)
|
)
|
||||||
|
|
||||||
const RpmDbGlob = "**/var/lib/rpm/Packages"
|
const RpmDBGlob = "**/var/lib/rpm/Packages"
|
||||||
|
|
||||||
var _ FileOwner = (*RpmdbMetadata)(nil)
|
var _ FileOwner = (*RpmdbMetadata)(nil)
|
||||||
|
|
||||||
|
|||||||
@ -25,7 +25,7 @@ func newAllLayersResolver(img *image.Image) (*allLayersResolver, error) {
|
|||||||
return nil, fmt.Errorf("the image does not contain any layers")
|
return nil, fmt.Errorf("the image does not contain any layers")
|
||||||
}
|
}
|
||||||
|
|
||||||
var layers = make([]int, 0)
|
layers := make([]int, 0)
|
||||||
for idx := range img.Layers {
|
for idx := range img.Layers {
|
||||||
layers = append(layers, idx)
|
layers = append(layers, idx)
|
||||||
}
|
}
|
||||||
@ -120,7 +120,7 @@ func (r *allLayersResolver) FilesByPath(paths ...string) ([]Location, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FilesByGlob returns all file.References that match the given path glob pattern from any layer in the image.
|
// FilesByGlob returns all file.References that match the given path glob pattern from any layer in the image.
|
||||||
// nolint:gocognit
|
|
||||||
func (r *allLayersResolver) FilesByGlob(patterns ...string) ([]Location, error) {
|
func (r *allLayersResolver) FilesByGlob(patterns ...string) ([]Location, error) {
|
||||||
uniqueFileIDs := file.NewFileReferenceSet()
|
uniqueFileIDs := file.NewFileReferenceSet()
|
||||||
uniqueLocations := make([]Location, 0)
|
uniqueLocations := make([]Location, 0)
|
||||||
|
|||||||
@ -204,7 +204,7 @@ func (r directoryResolver) String() string {
|
|||||||
|
|
||||||
// FilesByPath returns all file.References that match the given paths from the directory.
|
// FilesByPath returns all file.References that match the given paths from the directory.
|
||||||
func (r directoryResolver) FilesByPath(userPaths ...string) ([]Location, error) {
|
func (r directoryResolver) FilesByPath(userPaths ...string) ([]Location, error) {
|
||||||
var references = make([]Location, 0)
|
references := make([]Location, 0)
|
||||||
|
|
||||||
for _, userPath := range userPaths {
|
for _, userPath := range userPaths {
|
||||||
userStrPath, err := r.requestPath(userPath)
|
userStrPath, err := r.requestPath(userPath)
|
||||||
|
|||||||
@ -4,11 +4,9 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
|
||||||
|
|
||||||
"github.com/anchore/stereoscope/pkg/file"
|
"github.com/anchore/stereoscope/pkg/file"
|
||||||
|
|
||||||
"github.com/anchore/stereoscope/pkg/image"
|
"github.com/anchore/stereoscope/pkg/image"
|
||||||
|
"github.com/anchore/syft/internal/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileMetadata struct {
|
type FileMetadata struct {
|
||||||
|
|||||||
@ -3,10 +3,9 @@ package source
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/anchore/syft/internal/log"
|
|
||||||
|
|
||||||
"github.com/anchore/stereoscope/pkg/file"
|
"github.com/anchore/stereoscope/pkg/file"
|
||||||
"github.com/anchore/stereoscope/pkg/image"
|
"github.com/anchore/stereoscope/pkg/image"
|
||||||
|
"github.com/anchore/syft/internal/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Location represents a path relative to a particular filesystem resolved to a specific file.Reference. This struct is used as a key
|
// Location represents a path relative to a particular filesystem resolved to a specific file.Reference. This struct is used as a key
|
||||||
|
|||||||
@ -35,7 +35,7 @@ func NewMockResolverForPaths(paths ...string) *MockResolver {
|
|||||||
|
|
||||||
func NewMockResolverForPathsWithMetadata(metadata map[Location]FileMetadata) *MockResolver {
|
func NewMockResolverForPathsWithMetadata(metadata map[Location]FileMetadata) *MockResolver {
|
||||||
var locations []Location
|
var locations []Location
|
||||||
var mimeTypeIndex = make(map[string][]Location)
|
mimeTypeIndex := make(map[string][]Location)
|
||||||
for l, m := range metadata {
|
for l, m := range metadata {
|
||||||
locations = append(locations, l)
|
locations = append(locations, l)
|
||||||
mimeTypeIndex[m.MIMEType] = append(mimeTypeIndex[m.MIMEType], l)
|
mimeTypeIndex[m.MIMEType] = append(mimeTypeIndex[m.MIMEType], l)
|
||||||
|
|||||||
@ -46,6 +46,7 @@ func detectScheme(fs afero.Fs, imageDetector sourceDetector, userInput string) (
|
|||||||
|
|
||||||
fileMeta, err := fs.Stat(dirLocation)
|
fileMeta, err := fs.Stat(dirLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// nolint: nilerr
|
||||||
return UnknownScheme, source, "", nil
|
return UnknownScheme, source, "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -21,11 +21,13 @@ import (
|
|||||||
"github.com/wagoodman/jotframe/pkg/frame"
|
"github.com/wagoodman/jotframe/pkg/frame"
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxBarWidth = 50
|
const (
|
||||||
const statusSet = components.SpinnerDotSet
|
maxBarWidth = 50
|
||||||
const completedStatus = "✔"
|
statusSet = components.SpinnerDotSet
|
||||||
const tileFormat = color.Bold
|
completedStatus = "✔"
|
||||||
const interval = 150 * time.Millisecond
|
tileFormat = color.Bold
|
||||||
|
interval = 150 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
// StatusTitleColumn is the column index in a given row where status text will be displayed.
|
// StatusTitleColumn is the column index in a given row where status text will be displayed.
|
||||||
const StatusTitleColumn = 31
|
const StatusTitleColumn = 31
|
||||||
@ -184,7 +186,7 @@ func PullDockerImageHandler(ctx context.Context, fr *frame.Frame, event partybus
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FetchImageHandler periodically writes a the image save and write-to-disk process in the form of a progress bar.
|
// FetchImageHandler periodically writes a the image save and write-to-disk process in the form of a progress bar.
|
||||||
// nolint:dupl
|
|
||||||
func FetchImageHandler(ctx context.Context, fr *frame.Frame, event partybus.Event, wg *sync.WaitGroup) error {
|
func FetchImageHandler(ctx context.Context, fr *frame.Frame, event partybus.Event, wg *sync.WaitGroup) error {
|
||||||
_, prog, err := stereoEventParsers.ParseFetchImage(event)
|
_, prog, err := stereoEventParsers.ParseFetchImage(event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -313,7 +315,7 @@ func PackageCatalogerStartedHandler(ctx context.Context, fr *frame.Frame, event
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SecretsCatalogerStartedHandler shows the intermittent secrets searching progress.
|
// SecretsCatalogerStartedHandler shows the intermittent secrets searching progress.
|
||||||
// nolint:dupl
|
|
||||||
func SecretsCatalogerStartedHandler(ctx context.Context, fr *frame.Frame, event partybus.Event, wg *sync.WaitGroup) error {
|
func SecretsCatalogerStartedHandler(ctx context.Context, fr *frame.Frame, event partybus.Event, wg *sync.WaitGroup) error {
|
||||||
prog, err := syftEventParsers.ParseSecretsCatalogingStarted(event)
|
prog, err := syftEventParsers.ParseSecretsCatalogingStarted(event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -401,7 +403,7 @@ func FileMetadataCatalogerStartedHandler(ctx context.Context, fr *frame.Frame, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FileIndexingStartedHandler shows the intermittent indexing progress from a directory resolver.
|
// FileIndexingStartedHandler shows the intermittent indexing progress from a directory resolver.
|
||||||
// nolint:dupl
|
|
||||||
func FileIndexingStartedHandler(ctx context.Context, fr *frame.Frame, event partybus.Event, wg *sync.WaitGroup) error {
|
func FileIndexingStartedHandler(ctx context.Context, fr *frame.Frame, event partybus.Event, wg *sync.WaitGroup) error {
|
||||||
path, prog, err := syftEventParsers.ParseFileIndexingStarted(event)
|
path, prog, err := syftEventParsers.ParseFileIndexingStarted(event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -487,7 +489,7 @@ func FileDigestsCatalogerStartedHandler(ctx context.Context, fr *frame.Frame, ev
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ImportStartedHandler shows the intermittent upload progress to Anchore Enterprise.
|
// ImportStartedHandler shows the intermittent upload progress to Anchore Enterprise.
|
||||||
// nolint:dupl
|
|
||||||
func ImportStartedHandler(ctx context.Context, fr *frame.Frame, event partybus.Event, wg *sync.WaitGroup) error {
|
func ImportStartedHandler(ctx context.Context, fr *frame.Frame, event partybus.Event, wg *sync.WaitGroup) error {
|
||||||
host, prog, err := syftEventParsers.ParseImportStarted(event)
|
host, prog, err := syftEventParsers.ParseImportStarted(event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -16,8 +16,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Handler is an aggregated event handler for the set of supported events (PullDockerImage, ReadImage, FetchImage, PackageCatalogerStarted)
|
// Handler is an aggregated event handler for the set of supported events (PullDockerImage, ReadImage, FetchImage, PackageCatalogerStarted)
|
||||||
type Handler struct {
|
type Handler struct{}
|
||||||
}
|
|
||||||
|
|
||||||
// NewHandler returns an empty Handler
|
// NewHandler returns an empty Handler
|
||||||
func NewHandler() *Handler {
|
func NewHandler() *Handler {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user