mirror of
https://github.com/gocsaf/csaf.git
synced 2025-12-22 11:55:40 +01:00
Merge branch 'main' into itest-download-from-agg
This commit is contained in:
commit
82a1a1997a
11 changed files with 158 additions and 39 deletions
|
|
@ -16,7 +16,7 @@ is a command line tool that uploads CSAF documents to the `csaf_provider`.
|
|||
is an implementation of the role CSAF Aggregator.
|
||||
|
||||
## [csaf_checker](docs/csaf_checker.md)
|
||||
is a tool for testing a CSAF Trusted Provider according to [Section 7 of the CSAF standard](https://docs.oasis-open.org/csaf/csaf/v2.0/csaf-v2.0.html#7-distributing-csaf-documents).
|
||||
is a tool for testing a CSAF Trusted Provider according to [Section 7 of the CSAF standard](https://docs.oasis-open.org/csaf/csaf/v2.0/csaf-v2.0.html#7-distributing-csaf-documents). Does check requirements without considering the indicated `role` yet.
|
||||
|
||||
## [csaf_downloader](docs/csaf_downloader.md)
|
||||
is a tool for downloading advisories from a provider.
|
||||
|
|
|
|||
|
|
@ -36,12 +36,13 @@ type provider struct {
|
|||
Name string `toml:"name"`
|
||||
Domain string `toml:"domain"`
|
||||
// Rate gives the provider specific rate limiting (see overall Rate).
|
||||
Rate *float64 `toml:"rate"`
|
||||
Insecure *bool `toml:"insecure"`
|
||||
Categories *[]string `toml:"categories"`
|
||||
Rate *float64 `toml:"rate"`
|
||||
Insecure *bool `toml:"insecure"`
|
||||
WriteIndices *bool `toml:"write_indices"`
|
||||
Categories *[]string `toml:"categories"`
|
||||
// ServiceDocument incidates if we should create a service.json document.
|
||||
ServiceDocument *bool `toml:"create_service_document"`
|
||||
WriteIndices *bool `toml:"write_indices"`
|
||||
ServiceDocument *bool `toml:"create_service_document"`
|
||||
AggregatoryCategory *csaf.AggregatorCategory `toml:"category"`
|
||||
}
|
||||
|
||||
type config struct {
|
||||
|
|
@ -101,6 +102,26 @@ func (p *provider) writeIndices(c *config) bool {
|
|||
return c.WriteIndices
|
||||
}
|
||||
|
||||
func (p *provider) runAsMirror(c *config) bool {
|
||||
if p.AggregatoryCategory != nil {
|
||||
return *p.AggregatoryCategory == csaf.AggregatorAggregator
|
||||
}
|
||||
return c.runAsMirror()
|
||||
}
|
||||
|
||||
// atLeastNMirrors checks if there are at least n mirrors configured.
|
||||
func (c *config) atLeastNMirrors(n int) bool {
|
||||
var mirrors int
|
||||
for _, p := range c.Providers {
|
||||
if p.runAsMirror(c) {
|
||||
if mirrors++; mirrors >= n {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// runAsMirror determines if the aggregator should run in mirror mode.
|
||||
func (c *config) runAsMirror() bool {
|
||||
return c.Aggregator.Category != nil &&
|
||||
|
|
@ -184,6 +205,20 @@ func (c *config) checkProviders() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *config) checkMirror() error {
|
||||
if c.runAsMirror() {
|
||||
if !c.AllowSingleProvider && !c.atLeastNMirrors(2) {
|
||||
return errors.New("at least 2 providers need to be mirrored")
|
||||
} else if c.AllowSingleProvider && !c.atLeastNMirrors(1) {
|
||||
return errors.New("at least one provider must be mirrored")
|
||||
}
|
||||
} else if !c.AllowSingleProvider && c.atLeastNMirrors(1) {
|
||||
return errors.New("found mirrors in a lister aggregator")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *config) setDefaults() {
|
||||
if c.Folder == "" {
|
||||
c.Folder = defaultFolder
|
||||
|
|
@ -219,7 +254,11 @@ func (c *config) check() error {
|
|||
return err
|
||||
}
|
||||
|
||||
return c.checkProviders()
|
||||
if err := c.checkProviders(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.checkMirror()
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*config, error) {
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
type fullJob struct {
|
||||
provider *provider
|
||||
aggregatorProvider *csaf.AggregatorCSAFProvider
|
||||
work fullWorkFunc
|
||||
err error
|
||||
}
|
||||
|
||||
|
|
@ -61,11 +62,7 @@ func (w *worker) setupProviderFull(provider *provider) error {
|
|||
type fullWorkFunc func(*worker) (*csaf.AggregatorCSAFProvider, error)
|
||||
|
||||
// fullWork handles the treatment of providers concurrently.
|
||||
func (w *worker) fullWork(
|
||||
wg *sync.WaitGroup,
|
||||
doWork fullWorkFunc,
|
||||
jobs <-chan *fullJob,
|
||||
) {
|
||||
func (w *worker) fullWork(wg *sync.WaitGroup, jobs <-chan *fullJob) {
|
||||
defer wg.Done()
|
||||
|
||||
for j := range jobs {
|
||||
|
|
@ -73,16 +70,15 @@ func (w *worker) fullWork(
|
|||
j.err = err
|
||||
continue
|
||||
}
|
||||
j.aggregatorProvider, j.err = doWork(w)
|
||||
j.aggregatorProvider, j.err = j.work(w)
|
||||
}
|
||||
}
|
||||
|
||||
// full performs the complete lister/download
|
||||
func (p *processor) full() error {
|
||||
|
||||
var doWork fullWorkFunc
|
||||
|
||||
if p.cfg.runAsMirror() {
|
||||
log.Println("Running in aggregator mode")
|
||||
|
||||
// check if we need to setup a remote validator
|
||||
if p.cfg.RemoteValidatorOptions != nil {
|
||||
|
|
@ -98,11 +94,7 @@ func (p *processor) full() error {
|
|||
p.remoteValidator = nil
|
||||
}()
|
||||
}
|
||||
|
||||
doWork = (*worker).mirror
|
||||
log.Println("Running in aggregator mode")
|
||||
} else {
|
||||
doWork = (*worker).lister
|
||||
log.Println("Running in lister mode")
|
||||
}
|
||||
|
||||
|
|
@ -113,13 +105,22 @@ func (p *processor) full() error {
|
|||
for i := 1; i <= p.cfg.Workers; i++ {
|
||||
wg.Add(1)
|
||||
w := newWorker(i, p)
|
||||
go w.fullWork(&wg, doWork, queue)
|
||||
go w.fullWork(&wg, queue)
|
||||
}
|
||||
|
||||
jobs := make([]fullJob, len(p.cfg.Providers))
|
||||
|
||||
for i, p := range p.cfg.Providers {
|
||||
jobs[i] = fullJob{provider: p}
|
||||
for i, provider := range p.cfg.Providers {
|
||||
var work fullWorkFunc
|
||||
if provider.runAsMirror(p.cfg) {
|
||||
work = (*worker).mirror
|
||||
} else {
|
||||
work = (*worker).lister
|
||||
}
|
||||
jobs[i] = fullJob{
|
||||
provider: provider,
|
||||
work: work,
|
||||
}
|
||||
queue <- &jobs[i]
|
||||
}
|
||||
close(queue)
|
||||
|
|
|
|||
|
|
@ -76,7 +76,8 @@ func (w *worker) mirrorInternal() (*csaf.AggregatorCSAFProvider, error) {
|
|||
w.client,
|
||||
w.expr,
|
||||
w.metadataProvider,
|
||||
base)
|
||||
base,
|
||||
nil)
|
||||
|
||||
if err := afp.Process(w.mirrorFiles); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -115,7 +116,7 @@ func (w *worker) mirrorInternal() (*csaf.AggregatorCSAFProvider, error) {
|
|||
func (w *worker) labelsFromSummaries() []csaf.TLPLabel {
|
||||
labels := make([]csaf.TLPLabel, 0, len(w.summaries))
|
||||
for label := range w.summaries {
|
||||
labels = append(labels, csaf.TLPLabel(label))
|
||||
labels = append(labels, csaf.TLPLabel(strings.ToUpper(label)))
|
||||
}
|
||||
sort.Slice(labels, func(i, j int) bool { return labels[i] < labels[j] })
|
||||
return labels
|
||||
|
|
|
|||
|
|
@ -756,8 +756,13 @@ func (p *processor) checkIndex(base string, mask whereType) error {
|
|||
defer res.Body.Close()
|
||||
var files []csaf.AdvisoryFile
|
||||
scanner := bufio.NewScanner(res.Body)
|
||||
for scanner.Scan() {
|
||||
files = append(files, csaf.PlainAdvisoryFile(scanner.Text()))
|
||||
for line := 1; scanner.Scan(); line++ {
|
||||
u := scanner.Text()
|
||||
if _, err := url.Parse(u); err != nil {
|
||||
p.badIntegrities.error("index.txt contains invalid URL %q in line %d", u, line)
|
||||
continue
|
||||
}
|
||||
files = append(files, csaf.PlainAdvisoryFile(u))
|
||||
}
|
||||
return files, scanner.Err()
|
||||
}()
|
||||
|
|
|
|||
|
|
@ -114,7 +114,8 @@ func (d *downloader) download(domain string) error {
|
|||
d.httpClient(),
|
||||
d.eval,
|
||||
lpmd.Document,
|
||||
base)
|
||||
base,
|
||||
nil)
|
||||
|
||||
return afp.Process(d.downloadFiles)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ type AdvisoryFileProcessor struct {
|
|||
expr *util.PathEval
|
||||
doc interface{}
|
||||
base *url.URL
|
||||
log func(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// NewAdvisoryFileProcessor constructs an filename extractor
|
||||
|
|
@ -84,24 +85,34 @@ func NewAdvisoryFileProcessor(
|
|||
expr *util.PathEval,
|
||||
doc interface{},
|
||||
base *url.URL,
|
||||
log func(format string, args ...interface{}),
|
||||
) *AdvisoryFileProcessor {
|
||||
return &AdvisoryFileProcessor{
|
||||
client: client,
|
||||
expr: expr,
|
||||
doc: doc,
|
||||
base: base,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// Process extracts the adivisory filenames and passes them with
|
||||
// the corresponding label to fn.
|
||||
func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) error) error {
|
||||
func (afp *AdvisoryFileProcessor) Process(
|
||||
fn func(TLPLabel, []AdvisoryFile) error,
|
||||
) error {
|
||||
lg := afp.log
|
||||
if lg == nil {
|
||||
lg = func(format string, args ...interface{}) {
|
||||
log.Printf("AdvisoryFileProcessor.Process: "+format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have ROLIE feeds.
|
||||
rolie, err := afp.expr.Eval(
|
||||
"$.distributions[*].rolie.feeds", afp.doc)
|
||||
if err != nil {
|
||||
log.Printf("rolie check failed: %v\n", err)
|
||||
lg("rolie check failed: %v\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -113,7 +124,7 @@ func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) erro
|
|||
if err := util.ReMarshalJSON(&feeds, rolie); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("Found %d ROLIE feed(s).\n", len(feeds))
|
||||
lg("Found %d ROLIE feed(s).\n", len(feeds))
|
||||
|
||||
for _, feed := range feeds {
|
||||
if err := afp.processROLIE(feed, fn); err != nil {
|
||||
|
|
@ -122,7 +133,7 @@ func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) erro
|
|||
}
|
||||
} else {
|
||||
// No rolie feeds -> try to load files from index.txt
|
||||
files, err := afp.loadIndex()
|
||||
files, err := afp.loadIndex(lg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -136,12 +147,19 @@ func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) erro
|
|||
|
||||
// loadIndex loads baseURL/index.txt and returns a list of files
|
||||
// prefixed by baseURL/.
|
||||
func (afp *AdvisoryFileProcessor) loadIndex() ([]AdvisoryFile, error) {
|
||||
func (afp *AdvisoryFileProcessor) loadIndex(
|
||||
lg func(string, ...interface{}),
|
||||
) ([]AdvisoryFile, error) {
|
||||
baseURL, err := util.BaseURL(afp.base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexURL := baseURL + "/index.txt"
|
||||
base, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexURL := util.JoinURLPath(base, "index.txt").String()
|
||||
resp, err := afp.client.Get(indexURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -151,8 +169,14 @@ func (afp *AdvisoryFileProcessor) loadIndex() ([]AdvisoryFile, error) {
|
|||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
|
||||
for scanner.Scan() {
|
||||
files = append(files, PlainAdvisoryFile(baseURL+"/"+scanner.Text()))
|
||||
for line := 1; scanner.Scan(); line++ {
|
||||
u := scanner.Text()
|
||||
if _, err := url.Parse(u); err != nil {
|
||||
lg("index.txt contains invalid URL %q in line %d", u, line)
|
||||
continue
|
||||
}
|
||||
files = append(files,
|
||||
PlainAdvisoryFile(util.JoinURLPath(base, u).String()))
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
|
|
|
|||
|
|
@ -101,8 +101,14 @@ domain
|
|||
rate
|
||||
insecure
|
||||
write_indices
|
||||
category
|
||||
```
|
||||
|
||||
If you want an entry to be listed instead of mirrored
|
||||
in a `aggregator.category == "aggregator"` instance,
|
||||
set `category` to `lister` in the entry.
|
||||
Otherwise it is recommended to not set `category` for entries.
|
||||
|
||||
#### Example config file
|
||||
<!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/examples/aggregator.toml) -->
|
||||
<!-- The below code snippet is automatically added from ../docs/examples/aggregator.toml -->
|
||||
|
|
@ -123,6 +129,8 @@ insecure = true
|
|||
# allow_single_provider = true
|
||||
|
||||
[aggregator]
|
||||
# Set if this instance shall be a mirror (aka `aggregator`) or a `lister`.
|
||||
# This determines the default value for the entries in [[provider]].
|
||||
category = "aggregator"
|
||||
name = "Example Development CSAF Aggregator"
|
||||
contact_details = "some @ somewhere"
|
||||
|
|
@ -143,5 +151,15 @@ insecure = true
|
|||
# rate = 1.2
|
||||
# insecure = true
|
||||
write_indices = true
|
||||
|
||||
[[providers]]
|
||||
name = "local-dev-provider3"
|
||||
domain = "localhost"
|
||||
# rate = 1.8
|
||||
# insecure = true
|
||||
write_indices = true
|
||||
# If aggregator.category == "aggreator", set for an entry that should
|
||||
# be listed in addition:
|
||||
category = "lister"
|
||||
```
|
||||
<!-- MARKDOWN-AUTO-DOCS:END -->
|
||||
|
|
|
|||
|
|
@ -31,3 +31,10 @@ type 2: error
|
|||
```
|
||||
|
||||
The checker result is a success if no checks resulted in type 2, and a failure otherwise.
|
||||
|
||||
|
||||
### Remarks
|
||||
|
||||
The `role` given in the `provider-metadata.json` is not
|
||||
yet considered to change the overall result,
|
||||
see https://github.com/csaf-poc/csaf_distribution/issues/221 .
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ insecure = true
|
|||
# allow_single_provider = true
|
||||
|
||||
[aggregator]
|
||||
# Set if this instance shall be a mirror (aka `aggregator`) or a `lister`.
|
||||
# This determines the default value for the entries in [[provider]].
|
||||
category = "aggregator"
|
||||
name = "Example Development CSAF Aggregator"
|
||||
contact_details = "some @ somewhere"
|
||||
|
|
@ -34,3 +36,13 @@ insecure = true
|
|||
# rate = 1.2
|
||||
# insecure = true
|
||||
write_indices = true
|
||||
|
||||
[[providers]]
|
||||
name = "local-dev-provider3"
|
||||
domain = "localhost"
|
||||
# rate = 1.8
|
||||
# insecure = true
|
||||
write_indices = true
|
||||
# If aggregator.category == "aggreator", set for an entry that should
|
||||
# be listed in addition:
|
||||
category = "lister"
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ location /cgi-bin/ {
|
|||
fastcgi_param SCRIPT_FILENAME /usr/lib$fastcgi_script_name;
|
||||
|
||||
fastcgi_param PATH_INFO $fastcgi_path_info;
|
||||
fastcgi_param CSAF_CONFIG /usr/lib/csaf/config.toml;
|
||||
fastcgi_param CSAF_CONFIG /etc/csaf/config.toml;
|
||||
|
||||
fastcgi_param SSL_CLIENT_VERIFY $ssl_client_verify;
|
||||
fastcgi_param SSL_CLIENT_S_DN $ssl_client_s_dn;
|
||||
|
|
@ -103,9 +103,9 @@ Many systems use `www-data` as user id, so you could do something like
|
|||
<!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/scripts/setupProviderForITest.sh&lines=84-86) -->
|
||||
<!-- The below code snippet is automatically added from ../docs/scripts/setupProviderForITest.sh -->
|
||||
```sh
|
||||
sudo touch /usr/lib/csaf/config.toml
|
||||
sudo chgrp www-data /usr/lib/csaf/config.toml
|
||||
sudo chmod g+r,o-rwx /usr/lib/csaf/config.toml
|
||||
sudo touch /etc/csaf/config.toml
|
||||
sudo chgrp www-data /etc/csaf/config.toml
|
||||
sudo chmod g+r,o-rwx /etc/csaf/config.toml
|
||||
```
|
||||
<!-- MARKDOWN-AUTO-DOCS:END -->
|
||||
|
||||
|
|
@ -118,6 +118,17 @@ which you need to customize for a production setup,
|
|||
see the [options of `csaf_provider`](https://github.com/csaf-poc/csaf_distribution/blob/main/docs/csaf_provider.md).
|
||||
|
||||
<!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/scripts/setupProviderForITest.sh&lines=94-101) -->
|
||||
<!-- The below code snippet is automatically added from ../docs/scripts/setupProviderForITest.sh -->
|
||||
```sh
|
||||
# upload_signature = true
|
||||
openpgp_private_key = "/etc/csaf/private.asc"
|
||||
openpgp_public_key = "/etc/csaf/public.asc"
|
||||
#tlps = ["green", "red"]
|
||||
canonical_url_prefix = "https://localhost:8443"
|
||||
categories = ["Example Company Product A", "expr:document.lang"]
|
||||
create_service_document = true
|
||||
#no_passphrase = true
|
||||
```
|
||||
<!-- MARKDOWN-AUTO-DOCS:END -->
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue