1
0
Fork 0
mirror of https://github.com/gocsaf/csaf.git synced 2025-12-22 11:55:40 +01:00

Add aggregator; improve itest workflow

* Factor JSON evaluation and  construction base URLs out of of checker.
* Move json path matching to util.
* Add csaf_aggregator (as additional command)
* Improve itest workflow to checkout the branch where it is running on.

resolve #105
resolve  #72

Co-authored-by: tschmidtb51 <65305130+tschmidtb51@users.noreply.github.com>
Co-authored-by: Bernhard Reiter <bernhard@intevation.de>
Co-authored-by: Fadi Abbud <fadi.abbud@intevation.de>
This commit is contained in:
Sascha L. Teichmann 2022-05-10 18:12:38 +02:00 committed by GitHub
parent 9da0589236
commit 8a1ebe0b7a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 2789 additions and 88 deletions

View file

@ -0,0 +1,72 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"context"
"errors"
"io"
"net/http"
"net/url"
"golang.org/x/time/rate"
)
type client interface {
Do(req *http.Request) (*http.Response, error)
Get(url string) (*http.Response, error)
Head(url string) (*http.Response, error)
Post(url, contentType string, body io.Reader) (*http.Response, error)
PostForm(url string, data url.Values) (*http.Response, error)
}
type limitingClient struct {
client
limiter *rate.Limiter
}
func (lc *limitingClient) Do(req *http.Request) (*http.Response, error) {
lc.limiter.Wait(context.Background())
return lc.client.Do(req)
}
func (lc *limitingClient) Get(url string) (*http.Response, error) {
lc.limiter.Wait(context.Background())
return lc.client.Get(url)
}
func (lc *limitingClient) Head(url string) (*http.Response, error) {
lc.limiter.Wait(context.Background())
return lc.client.Head(url)
}
func (lc *limitingClient) Post(url, contentType string, body io.Reader) (*http.Response, error) {
lc.limiter.Wait(context.Background())
return lc.client.Post(url, contentType, body)
}
func (lc *limitingClient) PostForm(url string, data url.Values) (*http.Response, error) {
lc.limiter.Wait(context.Background())
return lc.client.PostForm(url, data)
}
var errNotFound = errors.New("not found")
func downloadJSON(c client, url string, found func(io.Reader) error) error {
res, err := c.Get(url)
if err != nil || res.StatusCode != http.StatusOK ||
res.Header.Get("Content-Type") != "application/json" {
// ignore this as it is expected.
return errNotFound
}
return func() error {
defer res.Body.Close()
return found(res.Body)
}()
}

View file

@ -0,0 +1,217 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"crypto/tls"
"errors"
"fmt"
"net/http"
"os"
"runtime"
"strings"
"sync"
"github.com/BurntSushi/toml"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/csaf-poc/csaf_distribution/csaf"
"golang.org/x/time/rate"
)
const (
defaultConfigPath = "aggregator.toml"
defaultWorkers = 10
defaultFolder = "/var/www"
defaultWeb = "/var/www/html"
defaultDomain = "https://example.com"
defaultOpenPGPURL = "https://openpgp.circl.lu/pks/lookup?op=get&search=${FINGERPRINT}" // Default OpenPGP URL.
)
type provider struct {
Name string `toml:"name"`
Domain string `toml:"domain"`
// Rate gives the provider specific rate limiting (see overall Rate).
Rate *float64 `toml:"rate"`
Insecure *bool `toml:"insecure"`
}
type config struct {
// Workers is the number of concurrently executed workers for downloading.
Workers int `toml:"workers"`
Folder string `toml:"folder"`
Web string `toml:"web"`
Domain string `toml:"domain"`
// Rate gives the average upper limit of https operations per second.
Rate *float64 `toml:"rate"`
Insecure *bool `toml:"insecure"`
Aggregator csaf.AggregatorInfo `toml:"aggregator"`
Providers []*provider `toml:"providers"`
Key string `toml:"key"`
OpenPGPURL string `toml:"openpgp_url"`
Passphrase *string `toml:"passphrase"`
AllowSingleProvider bool `toml:"allow_single_provider"`
// LockFile tries to lock to a given file.
LockFile *string `toml:"lock_file"`
// Interim performs an interim scan.
Interim bool `toml:"interim"`
// InterimYears is numbers numbers of years to look back
// for interim advisories. Less/equal zero means forever.
InterimYears int `toml:"interim_years"`
keyMu sync.Mutex
key *crypto.Key
keyErr error
}
// runAsMirror determines if the aggregator should run in mirror mode.
func (c *config) runAsMirror() bool {
return c.Aggregator.Category != nil &&
*c.Aggregator.Category == csaf.AggregatorAggregator
}
func (c *config) GetOpenPGPURL(key *crypto.Key) string {
if key == nil {
return c.OpenPGPURL
}
return strings.NewReplacer(
"${FINGERPRINT}", "0x"+key.GetFingerprint(),
"${KEY_ID}", "0x"+key.GetHexKeyID()).Replace(c.OpenPGPURL)
}
func (c *config) cryptoKey() (*crypto.Key, error) {
if c.Key == "" {
return nil, nil
}
c.keyMu.Lock()
defer c.keyMu.Unlock()
if c.key != nil || c.keyErr != nil {
return c.key, c.keyErr
}
var f *os.File
if f, c.keyErr = os.Open(c.Key); c.keyErr != nil {
return nil, c.keyErr
}
defer f.Close()
c.key, c.keyErr = crypto.NewKeyFromArmoredReader(f)
return c.key, c.keyErr
}
func (c *config) httpClient(p *provider) client {
client := http.Client{}
if p.Insecure != nil && *p.Insecure || c.Insecure != nil && *c.Insecure {
client.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
}
if p.Rate == nil && c.Rate == nil {
return &client
}
var r float64
if c.Rate != nil {
r = *c.Rate
}
if p.Rate != nil {
r = *p.Rate
}
return &limitingClient{
client: &client,
limiter: rate.NewLimiter(rate.Limit(r), 1),
}
}
func (c *config) checkProviders() error {
if !c.AllowSingleProvider && len(c.Providers) < 2 {
return errors.New("need at least two providers")
}
already := make(map[string]bool)
for _, p := range c.Providers {
if p.Name == "" {
return errors.New("no name given for provider")
}
if p.Domain == "" {
return errors.New("no domain given for provider")
}
if already[p.Name] {
return fmt.Errorf("provider '%s' is configured more than once", p.Name)
}
already[p.Name] = true
}
return nil
}
func (c *config) setDefaults() {
if c.Folder == "" {
c.Folder = defaultFolder
}
if c.Web == "" {
c.Web = defaultWeb
}
if c.Domain == "" {
c.Domain = defaultDomain
}
if c.OpenPGPURL == "" {
c.OpenPGPURL = defaultOpenPGPURL
}
if c.Workers <= 0 {
if n := runtime.NumCPU(); n > defaultWorkers {
c.Workers = defaultWorkers
} else {
c.Workers = n
}
}
if c.Workers > len(c.Providers) {
c.Workers = len(c.Providers)
}
}
func (c *config) check() error {
if len(c.Providers) == 0 {
return errors.New("no providers given in configuration")
}
if err := c.Aggregator.Validate(); err != nil {
return err
}
return c.checkProviders()
}
func loadConfig(path string) (*config, error) {
if path == "" {
path = defaultConfigPath
}
var cfg config
if _, err := toml.DecodeFile(path, &cfg); err != nil {
return nil, err
}
cfg.setDefaults()
if err := cfg.check(); err != nil {
return nil, err
}
return &cfg, nil
}

View file

@ -0,0 +1,7 @@
// csaf_aggregator is an implementation of the role CSAF Aggregator of the
// CSAF 2.0 specification
// (https://docs.oasis-open.org/csaf/csaf/v2.0/csd02/csaf-v2.0-csd02.html)
//
// TODO: To be called periodically, e.g with cron
package main

View file

@ -0,0 +1,38 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"fmt"
"os"
)
// writeHash writes a hash to file.
func writeHash(fname, name string, hash []byte) error {
f, err := os.Create(fname)
if err != nil {
return err
}
fmt.Fprintf(f, "%x %s\n", hash, name)
return f.Close()
}
// writeFileHashes writes a file and its hashes to files.
func writeFileHashes(fname, name string, data, s256, s512 []byte) error {
// Write the file itself.
if err := os.WriteFile(fname, data, 0644); err != nil {
return err
}
// Write SHA256 sum.
if err := writeHash(fname+".sha256", name, s256); err != nil {
return err
}
// Write SHA512 sum.
return writeHash(fname+".sha512", name, s512)
}

169
cmd/csaf_aggregator/full.go Normal file
View file

@ -0,0 +1,169 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"errors"
"fmt"
"log"
"os"
"path/filepath"
"sync"
"time"
"github.com/csaf-poc/csaf_distribution/csaf"
"github.com/csaf-poc/csaf_distribution/util"
)
type fullJob struct {
provider *provider
aggregatorProvider *csaf.AggregatorCSAFProvider
err error
}
// setupProviderFull fetches the provider-metadate.json for a specific provider.
func (w *worker) setupProviderFull(provider *provider) error {
log.Printf("worker #%d: %s (%s)\n",
w.num, provider.Name, provider.Domain)
w.dir = ""
w.provider = provider
// Each job needs a separate client.
w.client = w.cfg.httpClient(provider)
// We need the provider metadata in all cases.
if err := w.locateProviderMetadata(provider.Domain); err != nil {
return err
}
// Validate the provider metadata.
errors, err := csaf.ValidateProviderMetadata(w.metadataProvider)
if err != nil {
return err
}
if len(errors) > 0 {
return fmt.Errorf(
"provider-metadata.json has %d validation issues", len(errors))
}
log.Printf("provider-metadata: %s\n", w.loc)
return nil
}
// fullWorkFunc implements the actual work (mirror/list).
type fullWorkFunc func(*worker) (*csaf.AggregatorCSAFProvider, error)
// fullWork handles the treatment of providers concurrently.
func (w *worker) fullWork(
wg *sync.WaitGroup,
doWork fullWorkFunc,
jobs <-chan *fullJob,
) {
defer wg.Done()
for j := range jobs {
if err := w.setupProviderFull(j.provider); err != nil {
j.err = err
continue
}
j.aggregatorProvider, j.err = doWork(w)
}
}
// full performs the complete lister/download
func (p *processor) full() error {
var doWork fullWorkFunc
if p.cfg.runAsMirror() {
doWork = (*worker).mirror
log.Println("Running in aggregator mode")
} else {
doWork = (*worker).lister
log.Println("Running in lister mode")
}
queue := make(chan *fullJob)
var wg sync.WaitGroup
log.Printf("Starting %d workers.\n", p.cfg.Workers)
for i := 1; i <= p.cfg.Workers; i++ {
wg.Add(1)
w := newWorker(i, p.cfg)
go w.fullWork(&wg, doWork, queue)
}
jobs := make([]fullJob, len(p.cfg.Providers))
for i, p := range p.cfg.Providers {
jobs[i] = fullJob{provider: p}
queue <- &jobs[i]
}
close(queue)
wg.Wait()
// Assemble aggregator data structure.
csafProviders := make([]*csaf.AggregatorCSAFProvider, 0, len(jobs))
for i := range jobs {
j := &jobs[i]
if j.err != nil {
log.Printf("error: '%s' failed: %v\n", j.provider.Name, j.err)
continue
}
if j.aggregatorProvider == nil {
log.Printf(
"error: '%s' does not produce any result.\n", j.provider.Name)
continue
}
csafProviders = append(csafProviders, j.aggregatorProvider)
}
if len(csafProviders) == 0 {
return errors.New("all jobs failed, stopping")
}
version := csaf.AggregatorVersion20
canonicalURL := csaf.AggregatorURL(
p.cfg.Domain + "/.well-known/csaf-aggregator/aggregator.json")
lastUpdated := csaf.TimeStamp(time.Now())
agg := csaf.Aggregator{
Aggregator: &p.cfg.Aggregator,
Version: &version,
CanonicalURL: &canonicalURL,
CSAFProviders: csafProviders,
LastUpdated: &lastUpdated,
}
web := filepath.Join(p.cfg.Web, ".well-known", "csaf-aggregator")
dstName := filepath.Join(web, "aggregator.json")
fname, file, err := util.MakeUniqFile(dstName + ".tmp")
if err != nil {
return err
}
if _, err := agg.WriteTo(file); err != nil {
file.Close()
os.RemoveAll(fname)
return err
}
if err := file.Close(); err != nil {
return err
}
return os.Rename(fname, dstName)
}

View file

@ -0,0 +1,250 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"bufio"
"encoding/csv"
"fmt"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/csaf-poc/csaf_distribution/csaf"
"github.com/csaf-poc/csaf_distribution/util"
)
func (w *worker) writeInterims(label string, summaries []summary) error {
// Filter out the interims.
var ss []summary
for _, s := range summaries {
if s.summary.Status == "interim" {
ss = append(ss, s)
}
}
// No interims -> nothing to write
if len(ss) == 0 {
return nil
}
sort.SliceStable(ss, func(i, j int) bool {
return ss[i].summary.CurrentReleaseDate.After(
ss[j].summary.CurrentReleaseDate)
})
fname := filepath.Join(w.dir, label, "interim.csv")
f, err := os.Create(fname)
if err != nil {
return err
}
out := csv.NewWriter(f)
record := make([]string, 3)
for i := range ss {
s := &ss[i]
record[0] =
s.summary.CurrentReleaseDate.Format(time.RFC3339)
record[1] =
strconv.Itoa(s.summary.InitialReleaseDate.Year()) + "/" + s.filename
record[2] = s.url
if err := out.Write(record); err != nil {
f.Close()
return err
}
}
out.Flush()
err1 := out.Error()
err2 := f.Close()
if err1 != nil {
return err1
}
return err2
}
func (w *worker) writeCSV(label string, summaries []summary) error {
// Do not sort in-place.
ss := make([]summary, len(summaries))
copy(ss, summaries)
sort.SliceStable(ss, func(i, j int) bool {
return ss[i].summary.CurrentReleaseDate.After(
ss[j].summary.CurrentReleaseDate)
})
fname := filepath.Join(w.dir, label, "changes.csv")
f, err := os.Create(fname)
if err != nil {
return err
}
out := csv.NewWriter(f)
record := make([]string, 2)
for i := range ss {
s := &ss[i]
record[0] =
s.summary.CurrentReleaseDate.Format(time.RFC3339)
record[1] =
strconv.Itoa(s.summary.InitialReleaseDate.Year()) + "/" + s.filename
if err := out.Write(record); err != nil {
f.Close()
return err
}
}
out.Flush()
err1 := out.Error()
err2 := f.Close()
if err1 != nil {
return err1
}
return err2
}
func (w *worker) writeIndex(label string, summaries []summary) error {
fname := filepath.Join(w.dir, label, "index.txt")
f, err := os.Create(fname)
if err != nil {
return err
}
out := bufio.NewWriter(f)
for i := range summaries {
s := &summaries[i]
fmt.Fprintf(
out, "%d/%s\n",
s.summary.InitialReleaseDate.Year(),
s.filename)
}
err1 := out.Flush()
err2 := f.Close()
if err1 != nil {
return err1
}
return err2
}
func (w *worker) writeROLIE(label string, summaries []summary) error {
fname := "csaf-feed-tlp-" + strings.ToLower(label) + ".json"
feedURL := w.cfg.Domain + "/.well-known/csaf-aggregator/" +
w.provider.Name + "/" + fname
entries := make([]*csaf.Entry, len(summaries))
format := csaf.Format{
Schema: "https://docs.oasis-open.org/csaf/csaf/v2.0/csaf_json_schema.json",
Version: "2.0",
}
for i := range summaries {
s := &summaries[i]
csafURL := w.cfg.Domain + "/.well-known/csaf-aggregator/" +
w.provider.Name + "/" + label + "/" +
strconv.Itoa(s.summary.InitialReleaseDate.Year()) + "/" +
s.filename
entries[i] = &csaf.Entry{
ID: s.summary.ID,
Titel: s.summary.Title,
Published: csaf.TimeStamp(s.summary.InitialReleaseDate),
Updated: csaf.TimeStamp(s.summary.CurrentReleaseDate),
Link: []csaf.Link{{
Rel: "self",
HRef: csafURL,
}},
Format: format,
Content: csaf.Content{
Type: "application/json",
Src: csafURL,
},
}
if s.summary.Summary != "" {
entries[i].Summary = &csaf.Summary{
Content: s.summary.Summary,
}
}
}
rolie := &csaf.ROLIEFeed{
Feed: csaf.FeedData{
ID: "csaf-feed-tlp-" + strings.ToLower(label),
Title: "CSAF feed (TLP:" + strings.ToUpper(label) + ")",
Link: []csaf.Link{{
Rel: "rel",
HRef: feedURL,
}},
Updated: csaf.TimeStamp(time.Now()),
Entry: entries,
},
}
// Sort by descending updated order.
rolie.SortEntriesByUpdated()
path := filepath.Join(w.dir, fname)
return util.WriteToFile(path, rolie)
}
func (w *worker) writeIndices() error {
if len(w.summaries) == 0 || w.dir == "" {
return nil
}
for label, summaries := range w.summaries {
log.Printf("%s: %d\n", label, len(summaries))
if err := w.writeInterims(label, summaries); err != nil {
return err
}
if err := w.writeCSV(label, summaries); err != nil {
return err
}
if err := w.writeIndex(label, summaries); err != nil {
return err
}
if err := w.writeROLIE(label, summaries); err != nil {
return err
}
}
return nil
}
// loadIndex loads baseURL/index.txt and returns a list of files
// prefixed by baseURL/.
func (w *worker) loadIndex(baseURL string) ([]string, error) {
indexURL := baseURL + "/index.txt"
resp, err := w.client.Get(indexURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var lines []string
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
lines = append(lines, baseURL+"/"+scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}

View file

@ -0,0 +1,380 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"bytes"
"crypto/sha256"
"crypto/sha512"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/csaf-poc/csaf_distribution/csaf"
"github.com/csaf-poc/csaf_distribution/util"
)
type interimJob struct {
provider *provider
err error
}
func (w *worker) checkInterims(
tx *lazyTransaction,
label string,
interims [][2]string,
) ([]string, error) {
var data bytes.Buffer
labelPath := filepath.Join(tx.Src(), label)
// advisories which are not interim any longer.
var finalized []string
for _, interim := range interims {
local := filepath.Join(labelPath, interim[0])
url := interim[1]
// Load local SHA256 of the advisory
localHash, err := util.HashFromFile(local + ".sha256")
if err != nil {
return nil, nil
}
res, err := w.client.Get(url)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("fetching %s failed: Status code %d (%s)",
url, res.StatusCode, res.Status)
}
s256 := sha256.New()
data.Reset()
hasher := io.MultiWriter(s256, &data)
var doc interface{}
if err := func() error {
defer res.Body.Close()
tee := io.TeeReader(res.Body, hasher)
return json.NewDecoder(tee).Decode(&doc)
}(); err != nil {
return nil, err
}
remoteHash := s256.Sum(nil)
// If the hashes are equal then we can ignore this advisory.
if bytes.Equal(localHash, remoteHash) {
continue
}
errors, err := csaf.ValidateCSAF(doc)
if err != nil {
return nil, fmt.Errorf("failed to validate %s: %v", url, err)
}
// XXX: Should we return an error here?
for _, e := range errors {
log.Printf("validation error: %s: %v\n", url, e)
}
// We need to write the changed content.
// This will start the transcation if not already started.
dst, err := tx.Dst()
if err != nil {
return nil, err
}
// Overwrite in the cloned folder.
nlocal := filepath.Join(dst, label, interim[0])
bytes := data.Bytes()
if err := os.WriteFile(nlocal, bytes, 0644); err != nil {
return nil, err
}
name := filepath.Base(nlocal)
if err := util.WriteHashToFile(
nlocal+".sha512", name, sha512.New(), bytes,
); err != nil {
return nil, err
}
if err := util.WriteHashSumToFile(
nlocal+".sha256", name, remoteHash,
); err != nil {
return nil, err
}
// Download the signature
sigURL := url + ".asc"
ascFile := nlocal + ".asc"
// Download the signature or sign it our self.
if err := w.downloadSignatureOrSign(sigURL, ascFile, bytes); err != nil {
return nil, err
}
}
return finalized, nil
}
// setupProviderInterim prepares the worker for a specific provider.
func (w *worker) setupProviderInterim(provider *provider) {
log.Printf("worker #%d: %s (%s)\n",
w.num, provider.Name, provider.Domain)
w.dir = ""
w.provider = provider
// Each job needs a separate client.
w.client = w.cfg.httpClient(provider)
}
func (w *worker) interimWork(wg *sync.WaitGroup, jobs <-chan *interimJob) {
defer wg.Done()
path := filepath.Join(w.cfg.Web, ".well-known", "csaf-aggregator")
for j := range jobs {
w.setupProviderInterim(j.provider)
providerPath := filepath.Join(path, j.provider.Name)
j.err = func() error {
tx := newLazyTransaction(providerPath, w.cfg.Folder)
defer tx.rollback()
// Try all the labels
for _, label := range []string{
csaf.TLPLabelUnlabeled,
csaf.TLPLabelWhite,
csaf.TLPLabelGreen,
csaf.TLPLabelAmber,
csaf.TLPLabelRed,
} {
label = strings.ToLower(label)
labelPath := filepath.Join(providerPath, label)
interimsCSV := filepath.Join(labelPath, "interims.csv")
interims, err := readInterims(
interimsCSV, w.cfg.InterimYears)
if err != nil {
return err
}
// no interims found -> next label.
if len(interims) == 0 {
continue
}
// Compare locals against remotes.
finalized, err := w.checkInterims(tx, label, interims)
if err != nil {
return err
}
if len(finalized) > 0 {
// We want to write in the transaction folder.
dst, err := tx.Dst()
if err != nil {
return err
}
interimsCSV := filepath.Join(dst, label, "interims.csv")
if err := writeInterims(interimsCSV, finalized); err != nil {
return err
}
}
}
return tx.commit()
}()
}
}
// joinErrors creates an aggregated error of the messages
// of the given errors.
func joinErrors(errs []error) error {
if len(errs) == 0 {
return nil
}
var b strings.Builder
for i, err := range errs {
if i > 0 {
b.WriteString(", ")
}
b.WriteString(err.Error())
}
return errors.New(b.String())
}
// interim performs the short interim check/update.
func (p *processor) interim() error {
if !p.cfg.runAsMirror() {
return errors.New("iterim in lister mode does not work")
}
queue := make(chan *interimJob)
var wg sync.WaitGroup
log.Printf("Starting %d workers.\n", p.cfg.Workers)
for i := 1; i <= p.cfg.Workers; i++ {
wg.Add(1)
w := newWorker(i, p.cfg)
go w.interimWork(&wg, queue)
}
jobs := make([]interimJob, len(p.cfg.Providers))
for i, p := range p.cfg.Providers {
jobs[i] = interimJob{provider: p}
queue <- &jobs[i]
}
close(queue)
wg.Wait()
var errs []error
for i := range jobs {
if err := jobs[i].err; err != nil {
errs = append(errs, err)
}
}
return joinErrors(errs)
}
func writeInterims(interimsCSV string, finalized []string) error {
// In case this is a longer list (unlikely).
removed := make(map[string]bool, len(finalized))
for _, f := range finalized {
removed[f] = true
}
lines, err := func() ([][]string, error) {
interimsF, err := os.Open(interimsCSV)
if err != nil {
return nil, err
}
defer interimsF.Close()
c := csv.NewReader(interimsF)
c.FieldsPerRecord = 3
var lines [][]string
for {
record, err := c.Read()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
// If not finalized it survives
if !removed[record[1]] {
lines = append(lines, record)
}
}
return lines, nil
}()
if err != nil {
return err
}
// All interims are finalized now -> remove file.
if len(lines) == 0 {
return os.RemoveAll(interimsCSV)
}
// Overwrite old. It's save because we are in a transaction.
f, err := os.Create(interimsCSV)
if err != nil {
return err
}
c := csv.NewWriter(f)
if err := c.WriteAll(lines); err != nil {
return f.Close()
}
c.Flush()
err1 := c.Error()
err2 := f.Close()
if err1 != nil {
return err1
}
return err2
}
// readInterims scans a interims.csv file for matching
// iterim advisories. Its sorted with youngest
// first, so we can stop scanning if entries get too old.
func readInterims(interimsCSV string, years int) ([][2]string, error) {
var tooOld func(time.Time) bool
if years <= 0 {
tooOld = func(time.Time) bool { return false }
} else {
from := time.Now().AddDate(-years, 0, 0)
tooOld = func(t time.Time) bool { return t.Before(from) }
}
interimsF, err := os.Open(interimsCSV)
if err != nil {
// None existing file -> no interims.
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
defer interimsF.Close()
c := csv.NewReader(interimsF)
c.FieldsPerRecord = 3
var files [][2]string
for {
record, err := c.Read()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
t, err := time.Parse(time.RFC3339, record[0])
if err != nil {
return nil, err
}
if tooOld(t) {
break
}
files = append(files, [2]string{record[1], record[2]})
}
return files, nil
}

View file

@ -0,0 +1,86 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"os"
"path/filepath"
"github.com/csaf-poc/csaf_distribution/util"
)
type lazyTransaction struct {
src string
dstDir string
dst string
}
func newLazyTransaction(src, dstDir string) *lazyTransaction {
return &lazyTransaction{
src: src,
dstDir: dstDir,
}
}
func (lt *lazyTransaction) Src() string {
return lt.src
}
func (lt *lazyTransaction) Dst() (string, error) {
if lt.dst != "" {
return lt.dst, nil
}
srcBase := filepath.Base(lt.src)
folder := filepath.Join(lt.dstDir, srcBase)
dst, err := util.MakeUniqDir(folder)
if err != nil {
return "", err
}
// Copy old content into new.
if err := util.DeepCopy(lt.dst, lt.src); err != nil {
os.RemoveAll(dst)
return "", err
}
lt.dst = dst
return dst, nil
}
func (lt *lazyTransaction) rollback() error {
if lt.dst == "" {
return nil
}
err := os.RemoveAll(lt.dst)
lt.dst = ""
return err
}
func (lt *lazyTransaction) commit() error {
if lt.dst == "" {
return nil
}
defer func() { lt.dst = "" }()
// Switch directories.
symlink := filepath.Join(lt.dstDir, filepath.Base(lt.src))
if err := os.Symlink(lt.dstDir, symlink); err != nil {
os.RemoveAll(lt.dstDir)
return err
}
if err := os.Rename(symlink, lt.src); err != nil {
os.RemoveAll(lt.dstDir)
return err
}
return os.RemoveAll(lt.src)
}

View file

@ -0,0 +1,34 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"fmt"
"github.com/csaf-poc/csaf_distribution/csaf"
"github.com/csaf-poc/csaf_distribution/util"
)
// mirrorAllowed checks if mirroring is allowed.
func (w *worker) listAllowed() bool {
var b bool
return w.expr.Extract(
`$.list_on_CSAF_aggregators`,
util.BoolMatcher(&b), false, w.metadataProvider) == nil && b
}
func (w *worker) lister() (*csaf.AggregatorCSAFProvider, error) {
// Check if we are allowed to mirror this domain.
if !w.listAllowed() {
return nil, fmt.Errorf(
"no listing of '%s' allowed", w.provider.Name)
}
return w.createAggregatorProvider()
}

View file

@ -0,0 +1,76 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"fmt"
"log"
"os"
"github.com/csaf-poc/csaf_distribution/util"
"github.com/gofrs/flock"
"github.com/jessevdk/go-flags"
)
type options struct {
Config string `short:"c" long:"config" description:"File name of the configuration file" value-name:"CFG-FILE" default:"aggregator.toml"`
Version bool `long:"version" description:"Display version of the binary"`
Interim bool `short:"i" long:"interim" description:"Perform an interim scan"`
}
func errCheck(err error) {
if err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
os.Exit(0)
}
log.Fatalf("error: %v\n", err)
}
}
func lock(lockFile *string, fn func() error) error {
if lockFile == nil {
// No locking configured.
return fn()
}
fl := flock.New(*lockFile)
locked, err := fl.TryLock()
if err != nil {
return fmt.Errorf("file locking failed: %v", err)
}
if !locked {
return fmt.Errorf("cannot lock to file %s", *lockFile)
}
defer fl.Unlock()
return fn()
}
func main() {
opts := new(options)
_, err := flags.Parse(opts)
errCheck(err)
if opts.Version {
fmt.Println(util.SemVersion)
return
}
interim := opts.Interim
cfg, err := loadConfig(opts.Config)
errCheck(err)
if interim {
cfg.Interim = true
}
p := processor{cfg: cfg}
errCheck(lock(cfg.LockFile, p.process))
}

View file

@ -0,0 +1,516 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"bytes"
"crypto/sha256"
"crypto/sha512"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/csaf-poc/csaf_distribution/csaf"
"github.com/csaf-poc/csaf_distribution/util"
)
func (w *worker) handleROLIE(
rolie interface{},
process func(*csaf.TLPLabel, []string) error,
) error {
base, err := url.Parse(w.loc)
if err != nil {
return err
}
var feeds [][]csaf.Feed
if err := util.ReMarshalJSON(&feeds, rolie); err != nil {
return err
}
log.Printf("Found %d ROLIE feed(s).\n", len(feeds))
for _, fs := range feeds {
for i := range fs {
feed := &fs[i]
if feed.URL == nil {
continue
}
up, err := url.Parse(string(*feed.URL))
if err != nil {
log.Printf("Invalid URL %s in feed: %v.", *feed.URL, err)
continue
}
feedURL := base.ResolveReference(up).String()
log.Printf("Feed URL: %s\n", feedURL)
fb, err := util.BaseURL(feedURL)
if err != nil {
log.Printf("error: Invalid feed base URL '%s': %v\n", fb, err)
continue
}
feedBaseURL, err := url.Parse(fb)
if err != nil {
log.Printf("error: Cannot parse feed base URL '%s': %v\n", fb, err)
continue
}
res, err := w.client.Get(feedURL)
if err != nil {
log.Printf("error: Cannot get feed '%s'\n", err)
continue
}
if res.StatusCode != http.StatusOK {
log.Printf("error: Fetching %s failed. Status code %d (%s)",
feedURL, res.StatusCode, res.Status)
continue
}
rfeed, err := func() (*csaf.ROLIEFeed, error) {
defer res.Body.Close()
return csaf.LoadROLIEFeed(res.Body)
}()
if err != nil {
log.Printf("Loading ROLIE feed failed: %v.", err)
continue
}
files := resolveURLs(rfeed.Files(), feedBaseURL)
if err := process(feed.TLPLabel, files); err != nil {
return err
}
}
}
return nil
}
// mirrorAllowed checks if mirroring is allowed.
func (w *worker) mirrorAllowed() bool {
var b bool
return w.expr.Extract(
`$.mirror_on_CSAF_aggregators`,
util.BoolMatcher(&b), false, w.metadataProvider) == nil && b
}
func (w *worker) mirror() (*csaf.AggregatorCSAFProvider, error) {
result, err := w.mirrorInternal()
if err != nil && w.dir != "" {
// If something goes wrong remove the debris.
if err := os.RemoveAll(w.dir); err != nil {
log.Printf("error: %v\n", err)
}
}
return result, err
}
func (w *worker) mirrorInternal() (*csaf.AggregatorCSAFProvider, error) {
// Check if we are allowed to mirror this domain.
if !w.mirrorAllowed() {
return nil, fmt.Errorf(
"no mirroring of '%s' allowed", w.provider.Name)
}
// Collecting the summaries of the advisories.
w.summaries = make(map[string][]summary)
// Check if we have ROLIE feeds.
rolie, err := w.expr.Eval(
"$.distributions[*].rolie.feeds", w.metadataProvider)
if err != nil {
log.Printf("rolie check failed: %v\n", err)
return nil, err
}
fs, hasRolie := rolie.([]interface{})
hasRolie = hasRolie && len(fs) > 0
if hasRolie {
if err := w.handleROLIE(rolie, w.mirrorFiles); err != nil {
return nil, err
}
} else {
// No rolie feeds -> try to load files from index.txt
baseURL, err := util.BaseURL(w.loc)
if err != nil {
return nil, err
}
files, err := w.loadIndex(baseURL)
if err != nil {
return nil, err
}
_ = files
// XXX: Is treating as white okay? better look into the advisories?
white := csaf.TLPLabel(csaf.TLPLabelWhite)
if err := w.mirrorFiles(&white, files); err != nil {
return nil, err
}
} // TODO: else scan directories?
if err := w.writeIndices(); err != nil {
return nil, err
}
if err := w.doMirrorTransaction(); err != nil {
return nil, err
}
if err := w.writeProviderMetadata(); err != nil {
return nil, err
}
acp, err := w.createAggregatorProvider()
if err != nil {
return nil, err
}
// Add us as a miiror.
mirrorURL := csaf.ProviderURL(
fmt.Sprintf("%s/.well-known/csaf-aggregator/%s/provider-metadata.json",
w.cfg.Domain, w.provider.Name))
acp.Mirrors = []csaf.ProviderURL{
mirrorURL,
}
return acp, err
}
func (w *worker) labelsFromSummaries() []csaf.TLPLabel {
labels := make([]csaf.TLPLabel, 0, len(w.summaries))
for label := range w.summaries {
labels = append(labels, csaf.TLPLabel(label))
}
sort.Slice(labels, func(i, j int) bool { return labels[i] < labels[j] })
return labels
}
// writeProviderMetadata writes a local provider metadata for a mirror.
func (w *worker) writeProviderMetadata() error {
fname := filepath.Join(w.dir, "provider-metadata.json")
pm := csaf.NewProviderMetadataDomain(
w.cfg.Domain,
w.labelsFromSummaries())
// Figure out the role
var role csaf.MetadataRole
if strings.HasPrefix(w.provider.Domain, "https://") {
role = csaf.MetadataRolePublisher
} else {
role = csaf.MetadataRoleProvider
}
pm.Role = &role
pm.Publisher = new(csaf.Publisher)
var lastUpdate time.Time
if err := w.expr.Match([]util.PathEvalMatcher{
{Expr: `$.publisher`, Action: util.ReMarshalMatcher(pm.Publisher)},
{Expr: `$.last_updated`, Action: util.TimeMatcher(&lastUpdate, time.RFC3339)},
{Expr: `$.public_openpgp_keys`, Action: util.ReMarshalMatcher(&pm.PGPKeys)},
}, w.metadataProvider); err != nil {
// only log the errors
log.Printf("extracting data from orignal provider failed: %v\n", err)
}
key, err := w.cfg.cryptoKey()
if err != nil {
log.Printf("error: %v\n", err)
}
if key != nil {
pm.SetPGP(key.GetFingerprint(), w.cfg.GetOpenPGPURL(key))
}
la := csaf.TimeStamp(lastUpdate)
pm.LastUpdated = &la
return util.WriteToFile(fname, pm)
}
// createAggregatorProvider, der the "metadata" section in the "csaf_providers" of
// the aggregator document.
func (w *worker) createAggregatorProvider() (*csaf.AggregatorCSAFProvider, error) {
const (
lastUpdatedExpr = `$.last_updated`
publisherExpr = `$.publisher`
roleExpr = `$.role`
urlExpr = `$.canonical_url`
)
var (
lastUpdatedT time.Time
pub csaf.Publisher
roleS string
urlS string
)
if err := w.expr.Match([]util.PathEvalMatcher{
{Expr: lastUpdatedExpr, Action: util.TimeMatcher(&lastUpdatedT, time.RFC3339)},
{Expr: publisherExpr, Action: util.ReMarshalMatcher(&pub)},
{Expr: roleExpr, Action: util.StringMatcher(&roleS)},
{Expr: urlExpr, Action: util.StringMatcher(&urlS)},
}, w.metadataProvider); err != nil {
return nil, err
}
var (
lastUpdated = csaf.TimeStamp(lastUpdatedT)
role = csaf.MetadataRole(roleS)
url = csaf.ProviderURL(urlS)
)
return &csaf.AggregatorCSAFProvider{
Metadata: &csaf.AggregatorCSAFProviderMetadata{
LastUpdated: &lastUpdated,
Publisher: &pub,
Role: &role,
URL: &url,
},
}, nil
}
// doMirrorTransaction performs an atomic directory swap.
func (w *worker) doMirrorTransaction() error {
webTarget := filepath.Join(
w.cfg.Web, ".well-known", "csaf-aggregator", w.provider.Name)
var oldWeb string
// Resolve old to be removed later
if _, err := os.Stat(webTarget); err != nil {
if !os.IsNotExist(err) {
os.RemoveAll(w.dir)
return err
}
} else {
if oldWeb, err = filepath.EvalSymlinks(webTarget); err != nil {
os.RemoveAll(w.dir)
return err
}
}
// Check if there is a sysmlink already.
target := filepath.Join(w.cfg.Folder, w.provider.Name)
log.Printf("target: '%s'\n", target)
exists, err := util.PathExists(target)
if err != nil {
os.RemoveAll(w.dir)
return err
}
if exists {
if err := os.RemoveAll(target); err != nil {
os.RemoveAll(w.dir)
return err
}
}
log.Printf("sym link: %s -> %s\n", w.dir, target)
// Create a new symlink
if err := os.Symlink(w.dir, target); err != nil {
os.RemoveAll(w.dir)
return err
}
// Move the symlink
log.Printf("Move: %s -> %s\n", target, webTarget)
if err := os.Rename(target, webTarget); err != nil {
os.RemoveAll(w.dir)
return err
}
// Finally remove the old folder.
if oldWeb != "" {
return os.RemoveAll(oldWeb)
}
return nil
}
// downloadSignature downloads an OpenPGP signature from a given url.
func (w *worker) downloadSignature(path string) (string, error) {
res, err := w.client.Get(path)
if err != nil {
return "", err
}
if res.StatusCode != http.StatusOK {
return "", errNotFound
}
data, err := func() ([]byte, error) {
defer res.Body.Close()
return io.ReadAll(res.Body)
}()
if err != nil {
return "", err
}
result := string(data)
if _, err := crypto.NewPGPMessageFromArmored(result); err != nil {
return "", err
}
return result, nil
}
// sign signs the given data with the configured key.
func (w *worker) sign(data []byte) (string, error) {
if w.signRing == nil {
key, err := w.cfg.cryptoKey()
if err != nil {
return "", err
}
if key == nil {
return "", nil
}
if pp := w.cfg.Passphrase; pp != nil {
if key, err = key.Unlock([]byte(*pp)); err != nil {
return "", err
}
}
if w.signRing, err = crypto.NewKeyRing(key); err != nil {
return "", err
}
}
sig, err := w.signRing.SignDetached(crypto.NewPlainMessage(data))
if err != nil {
return "", err
}
return sig.GetArmored()
}
func (w *worker) mirrorFiles(tlpLabel *csaf.TLPLabel, files []string) error {
label := "unknown"
if tlpLabel != nil {
label = strings.ToLower(string(*tlpLabel))
}
summaries := w.summaries[label]
dir, err := w.createDir()
if err != nil {
return err
}
var content bytes.Buffer
yearDirs := make(map[int]string)
for _, file := range files {
u, err := url.Parse(file)
if err != nil {
log.Printf("error: %s\n", err)
continue
}
filename := util.CleanFileName(filepath.Base(u.Path))
var advisory interface{}
s256 := sha256.New()
s512 := sha512.New()
content.Reset()
hasher := io.MultiWriter(s256, s512, &content)
download := func(r io.Reader) error {
tee := io.TeeReader(r, hasher)
return json.NewDecoder(tee).Decode(&advisory)
}
if err := downloadJSON(w.client, file, download); err != nil {
log.Printf("error: %v\n", err)
continue
}
errors, err := csaf.ValidateCSAF(advisory)
if err != nil {
log.Printf("error: %s: %v", file, err)
continue
}
if len(errors) > 0 {
log.Printf("CSAF file %s has %d validation errors.",
file, len(errors))
continue
}
sum, err := csaf.NewAdvisorySummary(w.expr, advisory)
if err != nil {
log.Printf("error: %s: %v\n", file, err)
continue
}
summaries = append(summaries, summary{
filename: filename,
summary: sum,
url: file,
})
year := sum.InitialReleaseDate.Year()
yearDir := yearDirs[year]
if yearDir == "" {
yearDir = filepath.Join(dir, label, strconv.Itoa(year))
if err := os.MkdirAll(yearDir, 0755); err != nil {
return err
}
//log.Printf("created %s\n", yearDir)
yearDirs[year] = yearDir
}
fname := filepath.Join(yearDir, filename)
//log.Printf("write: %s\n", fname)
data := content.Bytes()
if err := writeFileHashes(
fname, filename,
data, s256.Sum(nil), s512.Sum(nil),
); err != nil {
return err
}
// Try to fetch signature file.
sigURL := file + ".asc"
ascFile := fname + ".asc"
if err := w.downloadSignatureOrSign(sigURL, ascFile, data); err != nil {
return err
}
}
w.summaries[label] = summaries
return nil
}
// downloadSignatureOrSign first tries to download a signature.
// If this fails it creates a signature itself with the configured key.
func (w *worker) downloadSignatureOrSign(url, fname string, data []byte) error {
sig, err := w.downloadSignature(url)
if err != nil {
if err != errNotFound {
log.Printf("error: %s: %v\n", url, err)
}
// Sign it our self.
if sig, err = w.sign(data); err != nil {
return err
}
}
if sig != "" {
err = os.WriteFile(fname, []byte(sig), 0644)
}
return err
}

View file

@ -0,0 +1,255 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"encoding/json"
"errors"
"io"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/csaf-poc/csaf_distribution/csaf"
"github.com/csaf-poc/csaf_distribution/util"
)
type processor struct {
cfg *config
}
type summary struct {
filename string
summary *csaf.AdvisorySummary
url string
}
type worker struct {
num int
expr *util.PathEval
cfg *config
signRing *crypto.KeyRing
client client // client per provider
provider *provider // current provider
metadataProvider interface{} // current metadata provider
loc string // URL of current provider-metadata.json
dir string // Directory to store data to.
summaries map[string][]summary // the summaries of the advisories.
}
func newWorker(num int, config *config) *worker {
return &worker{
num: num,
cfg: config,
expr: util.NewPathEval(),
}
}
func ensureDir(path string) error {
_, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
return os.MkdirAll(path, 0750)
}
return err
}
func (w *worker) createDir() (string, error) {
if w.dir != "" {
return w.dir, nil
}
dir, err := util.MakeUniqDir(
filepath.Join(w.cfg.Folder, w.provider.Name))
if err == nil {
w.dir = dir
}
return dir, err
}
// httpsDomain prefixes a domain with 'https://'.
func httpsDomain(domain string) string {
if strings.HasPrefix(domain, "https://") {
return domain
}
return "https://" + domain
}
var providerMetadataLocations = [...]string{
".well-known/csaf",
"security/data/csaf",
"advisories/csaf",
"security/csaf",
}
func (w *worker) locateProviderMetadata(domain string) error {
w.metadataProvider = nil
download := func(r io.Reader) error {
if err := json.NewDecoder(r).Decode(&w.metadataProvider); err != nil {
log.Printf("error: %s\n", err)
return errNotFound
}
return nil
}
hd := httpsDomain(domain)
for _, loc := range providerMetadataLocations {
url := hd + "/" + loc
if err := downloadJSON(w.client, url, download); err != nil {
if err == errNotFound {
continue
}
return err
}
if w.metadataProvider != nil {
w.loc = loc
return nil
}
}
// Read from security.txt
path := hd + "/.well-known/security.txt"
res, err := w.client.Get(path)
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return errNotFound
}
if err := func() error {
defer res.Body.Close()
urls, err := csaf.ExtractProviderURL(res.Body, false)
if err != nil {
return err
}
if len(urls) == 0 {
return errors.New("no provider-metadata.json found in secturity.txt")
}
w.loc = urls[0]
return nil
}(); err != nil {
return err
}
return downloadJSON(w.client, w.loc, download)
}
// removeOrphans removes the directories that are not in the providers list.
func (p *processor) removeOrphans() error {
keep := make(map[string]bool)
for _, p := range p.cfg.Providers {
keep[p.Name] = true
}
path := filepath.Join(p.cfg.Web, ".well-known", "csaf-aggregator")
entries, err := func() ([]os.DirEntry, error) {
dir, err := os.Open(path)
if err != nil {
return nil, err
}
defer dir.Close()
return dir.ReadDir(-1)
}()
if err != nil {
return err
}
prefix, err := filepath.Abs(p.cfg.Folder)
if err != nil {
return err
}
prefix, err = filepath.EvalSymlinks(prefix)
if err != nil {
return err
}
for _, entry := range entries {
if keep[entry.Name()] {
continue
}
fi, err := entry.Info()
if err != nil {
log.Printf("error: %v\n", err)
continue
}
// only remove the symlinks
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
continue
}
d := filepath.Join(path, entry.Name())
r, err := filepath.EvalSymlinks(d)
if err != nil {
log.Printf("error: %v\n", err)
continue
}
fd, err := os.Stat(r)
if err != nil {
log.Printf("error: %v\n", err)
continue
}
// If its not a directory its not a mirror.
if !fd.IsDir() {
continue
}
// Remove the link.
log.Printf("removing link %s -> %s\n", d, r)
if err := os.Remove(d); err != nil {
log.Printf("error: %v\n", err)
continue
}
// Only remove directories which are in our folder.
if rel, err := filepath.Rel(prefix, r); err == nil &&
rel == filepath.Base(r) {
log.Printf("removing directory %s\n", r)
if err := os.RemoveAll(r); err != nil {
log.Printf("error: %v\n", err)
}
}
}
return nil
}
// process is the main driver of the jobs handled by work.
func (p *processor) process() error {
if err := ensureDir(p.cfg.Folder); err != nil {
return err
}
web := filepath.Join(p.cfg.Web, ".well-known", "csaf-aggregator")
if err := ensureDir(web); err != nil {
return err
}
if err := p.removeOrphans(); err != nil {
return err
}
if p.cfg.Interim {
return p.interim()
}
return p.full()
}

View file

@ -0,0 +1,28 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"log"
"net/url"
)
// resolveURLs resolves a list of URLs urls against a base URL base.
func resolveURLs(urls []string, base *url.URL) []string {
out := make([]string, 0, len(urls))
for _, u := range urls {
p, err := url.Parse(u)
if err != nil {
log.Printf("error: Invalid URL '%s': %v\n", u, err)
continue
}
out = append(out, base.ResolveReference(p).String())
}
return out
}