1
0
Fork 0
mirror of https://github.com/gocsaf/csaf.git synced 2025-12-22 18:15:42 +01:00

Merge branch 'main' into itest-download-from-agg

This commit is contained in:
Bernhard Reiter 2022-07-22 09:10:07 +02:00
commit 82a1a1997a
No known key found for this signature in database
GPG key ID: 2B7BA3BF9BC3A554
11 changed files with 158 additions and 39 deletions

View file

@ -16,7 +16,7 @@ is a command line tool that uploads CSAF documents to the `csaf_provider`.
is an implementation of the role CSAF Aggregator. is an implementation of the role CSAF Aggregator.
## [csaf_checker](docs/csaf_checker.md) ## [csaf_checker](docs/csaf_checker.md)
is a tool for testing a CSAF Trusted Provider according to [Section 7 of the CSAF standard](https://docs.oasis-open.org/csaf/csaf/v2.0/csaf-v2.0.html#7-distributing-csaf-documents). is a tool for testing a CSAF Trusted Provider according to [Section 7 of the CSAF standard](https://docs.oasis-open.org/csaf/csaf/v2.0/csaf-v2.0.html#7-distributing-csaf-documents). Does check requirements without considering the indicated `role` yet.
## [csaf_downloader](docs/csaf_downloader.md) ## [csaf_downloader](docs/csaf_downloader.md)
is a tool for downloading advisories from a provider. is a tool for downloading advisories from a provider.

View file

@ -36,12 +36,13 @@ type provider struct {
Name string `toml:"name"` Name string `toml:"name"`
Domain string `toml:"domain"` Domain string `toml:"domain"`
// Rate gives the provider specific rate limiting (see overall Rate). // Rate gives the provider specific rate limiting (see overall Rate).
Rate *float64 `toml:"rate"` Rate *float64 `toml:"rate"`
Insecure *bool `toml:"insecure"` Insecure *bool `toml:"insecure"`
Categories *[]string `toml:"categories"` WriteIndices *bool `toml:"write_indices"`
Categories *[]string `toml:"categories"`
// ServiceDocument incidates if we should create a service.json document. // ServiceDocument incidates if we should create a service.json document.
ServiceDocument *bool `toml:"create_service_document"` ServiceDocument *bool `toml:"create_service_document"`
WriteIndices *bool `toml:"write_indices"` AggregatoryCategory *csaf.AggregatorCategory `toml:"category"`
} }
type config struct { type config struct {
@ -101,6 +102,26 @@ func (p *provider) writeIndices(c *config) bool {
return c.WriteIndices return c.WriteIndices
} }
func (p *provider) runAsMirror(c *config) bool {
if p.AggregatoryCategory != nil {
return *p.AggregatoryCategory == csaf.AggregatorAggregator
}
return c.runAsMirror()
}
// atLeastNMirrors checks if there are at least n mirrors configured.
func (c *config) atLeastNMirrors(n int) bool {
var mirrors int
for _, p := range c.Providers {
if p.runAsMirror(c) {
if mirrors++; mirrors >= n {
return true
}
}
}
return false
}
// runAsMirror determines if the aggregator should run in mirror mode. // runAsMirror determines if the aggregator should run in mirror mode.
func (c *config) runAsMirror() bool { func (c *config) runAsMirror() bool {
return c.Aggregator.Category != nil && return c.Aggregator.Category != nil &&
@ -184,6 +205,20 @@ func (c *config) checkProviders() error {
return nil return nil
} }
func (c *config) checkMirror() error {
if c.runAsMirror() {
if !c.AllowSingleProvider && !c.atLeastNMirrors(2) {
return errors.New("at least 2 providers need to be mirrored")
} else if c.AllowSingleProvider && !c.atLeastNMirrors(1) {
return errors.New("at least one provider must be mirrored")
}
} else if !c.AllowSingleProvider && c.atLeastNMirrors(1) {
return errors.New("found mirrors in a lister aggregator")
}
return nil
}
func (c *config) setDefaults() { func (c *config) setDefaults() {
if c.Folder == "" { if c.Folder == "" {
c.Folder = defaultFolder c.Folder = defaultFolder
@ -219,7 +254,11 @@ func (c *config) check() error {
return err return err
} }
return c.checkProviders() if err := c.checkProviders(); err != nil {
return err
}
return c.checkMirror()
} }
func loadConfig(path string) (*config, error) { func loadConfig(path string) (*config, error) {

View file

@ -24,6 +24,7 @@ import (
type fullJob struct { type fullJob struct {
provider *provider provider *provider
aggregatorProvider *csaf.AggregatorCSAFProvider aggregatorProvider *csaf.AggregatorCSAFProvider
work fullWorkFunc
err error err error
} }
@ -61,11 +62,7 @@ func (w *worker) setupProviderFull(provider *provider) error {
type fullWorkFunc func(*worker) (*csaf.AggregatorCSAFProvider, error) type fullWorkFunc func(*worker) (*csaf.AggregatorCSAFProvider, error)
// fullWork handles the treatment of providers concurrently. // fullWork handles the treatment of providers concurrently.
func (w *worker) fullWork( func (w *worker) fullWork(wg *sync.WaitGroup, jobs <-chan *fullJob) {
wg *sync.WaitGroup,
doWork fullWorkFunc,
jobs <-chan *fullJob,
) {
defer wg.Done() defer wg.Done()
for j := range jobs { for j := range jobs {
@ -73,16 +70,15 @@ func (w *worker) fullWork(
j.err = err j.err = err
continue continue
} }
j.aggregatorProvider, j.err = doWork(w) j.aggregatorProvider, j.err = j.work(w)
} }
} }
// full performs the complete lister/download // full performs the complete lister/download
func (p *processor) full() error { func (p *processor) full() error {
var doWork fullWorkFunc
if p.cfg.runAsMirror() { if p.cfg.runAsMirror() {
log.Println("Running in aggregator mode")
// check if we need to setup a remote validator // check if we need to setup a remote validator
if p.cfg.RemoteValidatorOptions != nil { if p.cfg.RemoteValidatorOptions != nil {
@ -98,11 +94,7 @@ func (p *processor) full() error {
p.remoteValidator = nil p.remoteValidator = nil
}() }()
} }
doWork = (*worker).mirror
log.Println("Running in aggregator mode")
} else { } else {
doWork = (*worker).lister
log.Println("Running in lister mode") log.Println("Running in lister mode")
} }
@ -113,13 +105,22 @@ func (p *processor) full() error {
for i := 1; i <= p.cfg.Workers; i++ { for i := 1; i <= p.cfg.Workers; i++ {
wg.Add(1) wg.Add(1)
w := newWorker(i, p) w := newWorker(i, p)
go w.fullWork(&wg, doWork, queue) go w.fullWork(&wg, queue)
} }
jobs := make([]fullJob, len(p.cfg.Providers)) jobs := make([]fullJob, len(p.cfg.Providers))
for i, p := range p.cfg.Providers { for i, provider := range p.cfg.Providers {
jobs[i] = fullJob{provider: p} var work fullWorkFunc
if provider.runAsMirror(p.cfg) {
work = (*worker).mirror
} else {
work = (*worker).lister
}
jobs[i] = fullJob{
provider: provider,
work: work,
}
queue <- &jobs[i] queue <- &jobs[i]
} }
close(queue) close(queue)

View file

@ -76,7 +76,8 @@ func (w *worker) mirrorInternal() (*csaf.AggregatorCSAFProvider, error) {
w.client, w.client,
w.expr, w.expr,
w.metadataProvider, w.metadataProvider,
base) base,
nil)
if err := afp.Process(w.mirrorFiles); err != nil { if err := afp.Process(w.mirrorFiles); err != nil {
return nil, err return nil, err
@ -115,7 +116,7 @@ func (w *worker) mirrorInternal() (*csaf.AggregatorCSAFProvider, error) {
func (w *worker) labelsFromSummaries() []csaf.TLPLabel { func (w *worker) labelsFromSummaries() []csaf.TLPLabel {
labels := make([]csaf.TLPLabel, 0, len(w.summaries)) labels := make([]csaf.TLPLabel, 0, len(w.summaries))
for label := range w.summaries { for label := range w.summaries {
labels = append(labels, csaf.TLPLabel(label)) labels = append(labels, csaf.TLPLabel(strings.ToUpper(label)))
} }
sort.Slice(labels, func(i, j int) bool { return labels[i] < labels[j] }) sort.Slice(labels, func(i, j int) bool { return labels[i] < labels[j] })
return labels return labels

View file

@ -756,8 +756,13 @@ func (p *processor) checkIndex(base string, mask whereType) error {
defer res.Body.Close() defer res.Body.Close()
var files []csaf.AdvisoryFile var files []csaf.AdvisoryFile
scanner := bufio.NewScanner(res.Body) scanner := bufio.NewScanner(res.Body)
for scanner.Scan() { for line := 1; scanner.Scan(); line++ {
files = append(files, csaf.PlainAdvisoryFile(scanner.Text())) u := scanner.Text()
if _, err := url.Parse(u); err != nil {
p.badIntegrities.error("index.txt contains invalid URL %q in line %d", u, line)
continue
}
files = append(files, csaf.PlainAdvisoryFile(u))
} }
return files, scanner.Err() return files, scanner.Err()
}() }()

View file

@ -114,7 +114,8 @@ func (d *downloader) download(domain string) error {
d.httpClient(), d.httpClient(),
d.eval, d.eval,
lpmd.Document, lpmd.Document,
base) base,
nil)
return afp.Process(d.downloadFiles) return afp.Process(d.downloadFiles)
} }

View file

@ -75,6 +75,7 @@ type AdvisoryFileProcessor struct {
expr *util.PathEval expr *util.PathEval
doc interface{} doc interface{}
base *url.URL base *url.URL
log func(format string, args ...interface{})
} }
// NewAdvisoryFileProcessor constructs an filename extractor // NewAdvisoryFileProcessor constructs an filename extractor
@ -84,24 +85,34 @@ func NewAdvisoryFileProcessor(
expr *util.PathEval, expr *util.PathEval,
doc interface{}, doc interface{},
base *url.URL, base *url.URL,
log func(format string, args ...interface{}),
) *AdvisoryFileProcessor { ) *AdvisoryFileProcessor {
return &AdvisoryFileProcessor{ return &AdvisoryFileProcessor{
client: client, client: client,
expr: expr, expr: expr,
doc: doc, doc: doc,
base: base, base: base,
log: log,
} }
} }
// Process extracts the adivisory filenames and passes them with // Process extracts the adivisory filenames and passes them with
// the corresponding label to fn. // the corresponding label to fn.
func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) error) error { func (afp *AdvisoryFileProcessor) Process(
fn func(TLPLabel, []AdvisoryFile) error,
) error {
lg := afp.log
if lg == nil {
lg = func(format string, args ...interface{}) {
log.Printf("AdvisoryFileProcessor.Process: "+format, args...)
}
}
// Check if we have ROLIE feeds. // Check if we have ROLIE feeds.
rolie, err := afp.expr.Eval( rolie, err := afp.expr.Eval(
"$.distributions[*].rolie.feeds", afp.doc) "$.distributions[*].rolie.feeds", afp.doc)
if err != nil { if err != nil {
log.Printf("rolie check failed: %v\n", err) lg("rolie check failed: %v\n", err)
return err return err
} }
@ -113,7 +124,7 @@ func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) erro
if err := util.ReMarshalJSON(&feeds, rolie); err != nil { if err := util.ReMarshalJSON(&feeds, rolie); err != nil {
return err return err
} }
log.Printf("Found %d ROLIE feed(s).\n", len(feeds)) lg("Found %d ROLIE feed(s).\n", len(feeds))
for _, feed := range feeds { for _, feed := range feeds {
if err := afp.processROLIE(feed, fn); err != nil { if err := afp.processROLIE(feed, fn); err != nil {
@ -122,7 +133,7 @@ func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) erro
} }
} else { } else {
// No rolie feeds -> try to load files from index.txt // No rolie feeds -> try to load files from index.txt
files, err := afp.loadIndex() files, err := afp.loadIndex(lg)
if err != nil { if err != nil {
return err return err
} }
@ -136,12 +147,19 @@ func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) erro
// loadIndex loads baseURL/index.txt and returns a list of files // loadIndex loads baseURL/index.txt and returns a list of files
// prefixed by baseURL/. // prefixed by baseURL/.
func (afp *AdvisoryFileProcessor) loadIndex() ([]AdvisoryFile, error) { func (afp *AdvisoryFileProcessor) loadIndex(
lg func(string, ...interface{}),
) ([]AdvisoryFile, error) {
baseURL, err := util.BaseURL(afp.base) baseURL, err := util.BaseURL(afp.base)
if err != nil { if err != nil {
return nil, err return nil, err
} }
indexURL := baseURL + "/index.txt" base, err := url.Parse(baseURL)
if err != nil {
return nil, err
}
indexURL := util.JoinURLPath(base, "index.txt").String()
resp, err := afp.client.Get(indexURL) resp, err := afp.client.Get(indexURL)
if err != nil { if err != nil {
return nil, err return nil, err
@ -151,8 +169,14 @@ func (afp *AdvisoryFileProcessor) loadIndex() ([]AdvisoryFile, error) {
scanner := bufio.NewScanner(resp.Body) scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() { for line := 1; scanner.Scan(); line++ {
files = append(files, PlainAdvisoryFile(baseURL+"/"+scanner.Text())) u := scanner.Text()
if _, err := url.Parse(u); err != nil {
lg("index.txt contains invalid URL %q in line %d", u, line)
continue
}
files = append(files,
PlainAdvisoryFile(util.JoinURLPath(base, u).String()))
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {

View file

@ -101,8 +101,14 @@ domain
rate rate
insecure insecure
write_indices write_indices
category
``` ```
If you want an entry to be listed instead of mirrored
in a `aggregator.category == "aggregator"` instance,
set `category` to `lister` in the entry.
Otherwise it is recommended to not set `category` for entries.
#### Example config file #### Example config file
<!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/examples/aggregator.toml) --> <!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/examples/aggregator.toml) -->
<!-- The below code snippet is automatically added from ../docs/examples/aggregator.toml --> <!-- The below code snippet is automatically added from ../docs/examples/aggregator.toml -->
@ -123,6 +129,8 @@ insecure = true
# allow_single_provider = true # allow_single_provider = true
[aggregator] [aggregator]
# Set if this instance shall be a mirror (aka `aggregator`) or a `lister`.
# This determines the default value for the entries in [[provider]].
category = "aggregator" category = "aggregator"
name = "Example Development CSAF Aggregator" name = "Example Development CSAF Aggregator"
contact_details = "some @ somewhere" contact_details = "some @ somewhere"
@ -143,5 +151,15 @@ insecure = true
# rate = 1.2 # rate = 1.2
# insecure = true # insecure = true
write_indices = true write_indices = true
[[providers]]
name = "local-dev-provider3"
domain = "localhost"
# rate = 1.8
# insecure = true
write_indices = true
# If aggregator.category == "aggreator", set for an entry that should
# be listed in addition:
category = "lister"
``` ```
<!-- MARKDOWN-AUTO-DOCS:END --> <!-- MARKDOWN-AUTO-DOCS:END -->

View file

@ -31,3 +31,10 @@ type 2: error
``` ```
The checker result is a success if no checks resulted in type 2, and a failure otherwise. The checker result is a success if no checks resulted in type 2, and a failure otherwise.
### Remarks
The `role` given in the `provider-metadata.json` is not
yet considered to change the overall result,
see https://github.com/csaf-poc/csaf_distribution/issues/221 .

View file

@ -14,6 +14,8 @@ insecure = true
# allow_single_provider = true # allow_single_provider = true
[aggregator] [aggregator]
# Set if this instance shall be a mirror (aka `aggregator`) or a `lister`.
# This determines the default value for the entries in [[provider]].
category = "aggregator" category = "aggregator"
name = "Example Development CSAF Aggregator" name = "Example Development CSAF Aggregator"
contact_details = "some @ somewhere" contact_details = "some @ somewhere"
@ -34,3 +36,13 @@ insecure = true
# rate = 1.2 # rate = 1.2
# insecure = true # insecure = true
write_indices = true write_indices = true
[[providers]]
name = "local-dev-provider3"
domain = "localhost"
# rate = 1.8
# insecure = true
write_indices = true
# If aggregator.category == "aggreator", set for an entry that should
# be listed in addition:
category = "lister"

View file

@ -53,7 +53,7 @@ location /cgi-bin/ {
fastcgi_param SCRIPT_FILENAME /usr/lib$fastcgi_script_name; fastcgi_param SCRIPT_FILENAME /usr/lib$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info; fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param CSAF_CONFIG /usr/lib/csaf/config.toml; fastcgi_param CSAF_CONFIG /etc/csaf/config.toml;
fastcgi_param SSL_CLIENT_VERIFY $ssl_client_verify; fastcgi_param SSL_CLIENT_VERIFY $ssl_client_verify;
fastcgi_param SSL_CLIENT_S_DN $ssl_client_s_dn; fastcgi_param SSL_CLIENT_S_DN $ssl_client_s_dn;
@ -103,9 +103,9 @@ Many systems use `www-data` as user id, so you could do something like
<!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/scripts/setupProviderForITest.sh&lines=84-86) --> <!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/scripts/setupProviderForITest.sh&lines=84-86) -->
<!-- The below code snippet is automatically added from ../docs/scripts/setupProviderForITest.sh --> <!-- The below code snippet is automatically added from ../docs/scripts/setupProviderForITest.sh -->
```sh ```sh
sudo touch /usr/lib/csaf/config.toml sudo touch /etc/csaf/config.toml
sudo chgrp www-data /usr/lib/csaf/config.toml sudo chgrp www-data /etc/csaf/config.toml
sudo chmod g+r,o-rwx /usr/lib/csaf/config.toml sudo chmod g+r,o-rwx /etc/csaf/config.toml
``` ```
<!-- MARKDOWN-AUTO-DOCS:END --> <!-- MARKDOWN-AUTO-DOCS:END -->
@ -118,6 +118,17 @@ which you need to customize for a production setup,
see the [options of `csaf_provider`](https://github.com/csaf-poc/csaf_distribution/blob/main/docs/csaf_provider.md). see the [options of `csaf_provider`](https://github.com/csaf-poc/csaf_distribution/blob/main/docs/csaf_provider.md).
<!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/scripts/setupProviderForITest.sh&lines=94-101) --> <!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/scripts/setupProviderForITest.sh&lines=94-101) -->
<!-- The below code snippet is automatically added from ../docs/scripts/setupProviderForITest.sh -->
```sh
# upload_signature = true
openpgp_private_key = "/etc/csaf/private.asc"
openpgp_public_key = "/etc/csaf/public.asc"
#tlps = ["green", "red"]
canonical_url_prefix = "https://localhost:8443"
categories = ["Example Company Product A", "expr:document.lang"]
create_service_document = true
#no_passphrase = true
```
<!-- MARKDOWN-AUTO-DOCS:END --> <!-- MARKDOWN-AUTO-DOCS:END -->