1
0
Fork 0
mirror of https://github.com/gocsaf/csaf.git synced 2025-12-22 18:15:42 +01:00

Merge branch 'main' into rolie-categories

This commit is contained in:
Sascha L. Teichmann 2022-06-24 11:07:05 +02:00
commit e25fe66ee8
35 changed files with 1366 additions and 312 deletions

View file

@ -26,6 +26,7 @@ jobs:
./TLSClientConfigsForITest.sh ./TLSClientConfigsForITest.sh
./setupProviderForITest.sh ./setupProviderForITest.sh
./testAggregator.sh ./testAggregator.sh
./testDownloader.sh
shell: bash shell: bash
- name: Upload test results - name: Upload test results

View file

@ -21,3 +21,4 @@
| github.com/gofrs/flock | BSD-3-Clause | | github.com/gofrs/flock | BSD-3-Clause |
| github.com/PuerkitoBio/goquery | BSD-3-Clause | | github.com/PuerkitoBio/goquery | BSD-3-Clause |
| github.com/andybalholm/cascadia | BSD-2-Clause | | github.com/andybalholm/cascadia | BSD-2-Clause |
| go.etcd.io/bbolt | MIT |

View file

@ -76,9 +76,9 @@ dist: build_linux build_win
mkdir -p dist mkdir -p dist
mkdir -p dist/$(DISTDIR)-windows-amd64/bin-windows-amd64 mkdir -p dist/$(DISTDIR)-windows-amd64/bin-windows-amd64
cp README.md dist/$(DISTDIR)-windows-amd64 cp README.md dist/$(DISTDIR)-windows-amd64
cp bin-windows-amd64/csaf_uploader.exe bin-windows-amd64/csaf_checker.exe dist/$(DISTDIR)-windows-amd64/bin-windows-amd64/ cp bin-windows-amd64/csaf_uploader.exe bin-windows-amd64/csaf_checker.exe bin-windows-amd64/csaf_downloader.exe dist/$(DISTDIR)-windows-amd64/bin-windows-amd64/
mkdir -p dist/$(DISTDIR)-windows-amd64/docs mkdir -p dist/$(DISTDIR)-windows-amd64/docs
cp docs/csaf_uploader.md docs/csaf_checker.md dist/$(DISTDIR)-windows-amd64/docs cp docs/csaf_uploader.md docs/csaf_checker.md docs/csaf_downloader.md dist/$(DISTDIR)-windows-amd64/docs
mkdir dist/$(DISTDIR)-gnulinux-amd64 mkdir dist/$(DISTDIR)-gnulinux-amd64
cp -r README.md docs bin-linux-amd64 dist/$(DISTDIR)-gnulinux-amd64 cp -r README.md docs bin-linux-amd64 dist/$(DISTDIR)-gnulinux-amd64
cd dist/ ; zip -r $(DISTDIR)-windows-amd64.zip $(DISTDIR)-windows-amd64/ cd dist/ ; zip -r $(DISTDIR)-windows-amd64.zip $(DISTDIR)-windows-amd64/

View file

@ -1,8 +1,8 @@
# csaf_distribution # csaf_distribution
An implementation of a [CSAF 2.0](https://docs.oasis-open.org/csaf/csaf/v2.0/csd02/csaf-v2.0-csd02.html) trusted provider, checker and aggregator. Includes an uploader command line tool for the trusted provider. An implementation of a [CSAF 2.0](https://docs.oasis-open.org/csaf/csaf/v2.0/csd02/csaf-v2.0-csd02.html) trusted provider, checker, aggregator and downloader. Includes an uploader command line tool for the trusted provider.
Status: Beta (ready for more testing, but known short comings, see issues) Status: Beta (ready for more testing, but known shortcomings see issues)
## [csaf_provider](docs/csaf_provider.md) ## [csaf_provider](docs/csaf_provider.md)
@ -18,6 +18,9 @@ is an implementation of the role CSAF Aggregator.
## [csaf_checker](docs/csaf_checker.md) ## [csaf_checker](docs/csaf_checker.md)
is a tool for testing a CSAF Trusted Provider according to [Section 7 of the CSAF standard](https://docs.oasis-open.org/csaf/csaf/v2.0/csaf-v2.0.html#7-distributing-csaf-documents). is a tool for testing a CSAF Trusted Provider according to [Section 7 of the CSAF standard](https://docs.oasis-open.org/csaf/csaf/v2.0/csaf-v2.0.html#7-distributing-csaf-documents).
## [csaf_downloader](docs/csaf_downloader.md)
is a tool for downloading advisories from a provider.
## Setup ## Setup
Note that the server side is only tested Note that the server side is only tested
and the binaries available for GNU/Linux-Systems, e.g. Ubuntu LTS. and the binaries available for GNU/Linux-Systems, e.g. Ubuntu LTS.

View file

@ -67,6 +67,9 @@ type config struct {
// for interim advisories. Less/equal zero means forever. // for interim advisories. Less/equal zero means forever.
InterimYears int `toml:"interim_years"` InterimYears int `toml:"interim_years"`
// RemoteValidator configures an optional remote validation.
RemoteValidatorOptions *csaf.RemoteValidatorOptions `toml:"remote_validator"`
keyMu sync.Mutex keyMu sync.Mutex
key *crypto.Key key *crypto.Key
keyErr error keyErr error

View file

@ -36,7 +36,7 @@ func (w *worker) setupProviderFull(provider *provider) error {
w.provider = provider w.provider = provider
// Each job needs a separate client. // Each job needs a separate client.
w.client = w.cfg.httpClient(provider) w.client = w.processor.cfg.httpClient(provider)
// We need the provider metadata in all cases. // We need the provider metadata in all cases.
if err := w.locateProviderMetadata(provider.Domain); err != nil { if err := w.locateProviderMetadata(provider.Domain); err != nil {
@ -83,6 +83,22 @@ func (p *processor) full() error {
var doWork fullWorkFunc var doWork fullWorkFunc
if p.cfg.runAsMirror() { if p.cfg.runAsMirror() {
// check if we need to setup a remote validator
if p.cfg.RemoteValidatorOptions != nil {
validator, err := p.cfg.RemoteValidatorOptions.Open()
if err != nil {
return err
}
// Not sure if we really need it to be serialized.
p.remoteValidator = csaf.SynchronizedRemoteValidator(validator)
defer func() {
p.remoteValidator.Close()
p.remoteValidator = nil
}()
}
doWork = (*worker).mirror doWork = (*worker).mirror
log.Println("Running in aggregator mode") log.Println("Running in aggregator mode")
} else { } else {
@ -96,7 +112,7 @@ func (p *processor) full() error {
log.Printf("Starting %d workers.\n", p.cfg.Workers) log.Printf("Starting %d workers.\n", p.cfg.Workers)
for i := 1; i <= p.cfg.Workers; i++ { for i := 1; i <= p.cfg.Workers; i++ {
wg.Add(1) wg.Add(1)
w := newWorker(i, p.cfg) w := newWorker(i, p)
go w.fullWork(&wg, doWork, queue) go w.fullWork(&wg, doWork, queue)
} }

View file

@ -143,7 +143,7 @@ func (w *worker) writeROLIE(label string, summaries []summary) error {
fname := "csaf-feed-tlp-" + labelFolder + ".json" fname := "csaf-feed-tlp-" + labelFolder + ".json"
feedURL := w.cfg.Domain + "/.well-known/csaf-aggregator/" + feedURL := w.processor.cfg.Domain + "/.well-known/csaf-aggregator/" +
w.provider.Name + "/" + labelFolder + "/" + fname w.provider.Name + "/" + labelFolder + "/" + fname
entries := make([]*csaf.Entry, len(summaries)) entries := make([]*csaf.Entry, len(summaries))
@ -156,7 +156,7 @@ func (w *worker) writeROLIE(label string, summaries []summary) error {
for i := range summaries { for i := range summaries {
s := &summaries[i] s := &summaries[i]
csafURL := w.cfg.Domain + "/.well-known/csaf-aggregator/" + csafURL := w.processor.cfg.Domain + "/.well-known/csaf-aggregator/" +
w.provider.Name + "/" + label + "/" + w.provider.Name + "/" + label + "/" +
strconv.Itoa(s.summary.InitialReleaseDate.Year()) + "/" + strconv.Itoa(s.summary.InitialReleaseDate.Year()) + "/" +
s.filename s.filename
@ -233,26 +233,3 @@ func (w *worker) writeIndices() error {
return nil return nil
} }
// loadIndex loads baseURL/index.txt and returns a list of files
// prefixed by baseURL/.
func (w *worker) loadIndex(baseURL string) ([]string, error) {
indexURL := baseURL + "/index.txt"
resp, err := w.client.Get(indexURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var lines []string
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
lines = append(lines, baseURL+"/"+scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}

View file

@ -149,12 +149,12 @@ func (w *worker) setupProviderInterim(provider *provider) {
w.provider = provider w.provider = provider
// Each job needs a separate client. // Each job needs a separate client.
w.client = w.cfg.httpClient(provider) w.client = w.processor.cfg.httpClient(provider)
} }
func (w *worker) interimWork(wg *sync.WaitGroup, jobs <-chan *interimJob) { func (w *worker) interimWork(wg *sync.WaitGroup, jobs <-chan *interimJob) {
defer wg.Done() defer wg.Done()
path := filepath.Join(w.cfg.Web, ".well-known", "csaf-aggregator") path := filepath.Join(w.processor.cfg.Web, ".well-known", "csaf-aggregator")
for j := range jobs { for j := range jobs {
w.setupProviderInterim(j.provider) w.setupProviderInterim(j.provider)
@ -162,7 +162,7 @@ func (w *worker) interimWork(wg *sync.WaitGroup, jobs <-chan *interimJob) {
providerPath := filepath.Join(path, j.provider.Name) providerPath := filepath.Join(path, j.provider.Name)
j.err = func() error { j.err = func() error {
tx := newLazyTransaction(providerPath, w.cfg.Folder) tx := newLazyTransaction(providerPath, w.processor.cfg.Folder)
defer tx.rollback() defer tx.rollback()
// Try all the labels // Try all the labels
@ -178,7 +178,7 @@ func (w *worker) interimWork(wg *sync.WaitGroup, jobs <-chan *interimJob) {
interimsCSV := filepath.Join(labelPath, "interims.csv") interimsCSV := filepath.Join(labelPath, "interims.csv")
interims, err := readInterims( interims, err := readInterims(
interimsCSV, w.cfg.InterimYears) interimsCSV, w.processor.cfg.InterimYears)
if err != nil { if err != nil {
return err return err
} }
@ -240,7 +240,7 @@ func (p *processor) interim() error {
log.Printf("Starting %d workers.\n", p.cfg.Workers) log.Printf("Starting %d workers.\n", p.cfg.Workers)
for i := 1; i <= p.cfg.Workers; i++ { for i := 1; i <= p.cfg.Workers; i++ {
wg.Add(1) wg.Add(1)
w := newWorker(i, p.cfg) w := newWorker(i, p)
go w.interimWork(&wg, queue) go w.interimWork(&wg, queue)
} }

View file

@ -26,7 +26,7 @@ type options struct {
func errCheck(err error) { func errCheck(err error) {
if err != nil { if err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp { if flags.WroteHelp(err) {
os.Exit(0) os.Exit(0)
} }
log.Fatalf("error: %v\n", err) log.Fatalf("error: %v\n", err)

View file

@ -29,76 +29,11 @@ import (
"github.com/ProtonMail/gopenpgp/v2/armor" "github.com/ProtonMail/gopenpgp/v2/armor"
"github.com/ProtonMail/gopenpgp/v2/constants" "github.com/ProtonMail/gopenpgp/v2/constants"
"github.com/ProtonMail/gopenpgp/v2/crypto" "github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/csaf-poc/csaf_distribution/csaf" "github.com/csaf-poc/csaf_distribution/csaf"
"github.com/csaf-poc/csaf_distribution/util" "github.com/csaf-poc/csaf_distribution/util"
) )
func (w *worker) handleROLIE(
rolie interface{},
process func(*csaf.TLPLabel, []string) error,
) error {
base, err := url.Parse(w.loc)
if err != nil {
return err
}
var feeds [][]csaf.Feed
if err := util.ReMarshalJSON(&feeds, rolie); err != nil {
return err
}
log.Printf("Found %d ROLIE feed(s).\n", len(feeds))
for _, fs := range feeds {
for i := range fs {
feed := &fs[i]
if feed.URL == nil {
continue
}
up, err := url.Parse(string(*feed.URL))
if err != nil {
log.Printf("Invalid URL %s in feed: %v.", *feed.URL, err)
continue
}
feedURL := base.ResolveReference(up).String()
log.Printf("Feed URL: %s\n", feedURL)
fb, err := util.BaseURL(feedURL)
if err != nil {
log.Printf("error: Invalid feed base URL '%s': %v\n", fb, err)
continue
}
feedBaseURL, err := url.Parse(fb)
if err != nil {
log.Printf("error: Cannot parse feed base URL '%s': %v\n", fb, err)
continue
}
res, err := w.client.Get(feedURL)
if err != nil {
log.Printf("error: Cannot get feed '%s'\n", err)
continue
}
if res.StatusCode != http.StatusOK {
log.Printf("error: Fetching %s failed. Status code %d (%s)",
feedURL, res.StatusCode, res.Status)
continue
}
rfeed, err := func() (*csaf.ROLIEFeed, error) {
defer res.Body.Close()
return csaf.LoadROLIEFeed(res.Body)
}()
if err != nil {
log.Printf("Loading ROLIE feed failed: %v.", err)
continue
}
files := resolveURLs(rfeed.Files("self"), feedBaseURL)
if err := process(feed.TLPLabel, files); err != nil {
return err
}
}
}
return nil
}
// mirrorAllowed checks if mirroring is allowed. // mirrorAllowed checks if mirroring is allowed.
func (w *worker) mirrorAllowed() bool { func (w *worker) mirrorAllowed() bool {
var b bool var b bool
@ -129,38 +64,20 @@ func (w *worker) mirrorInternal() (*csaf.AggregatorCSAFProvider, error) {
// Collecting the summaries of the advisories. // Collecting the summaries of the advisories.
w.summaries = make(map[string][]summary) w.summaries = make(map[string][]summary)
// Check if we have ROLIE feeds. base, err := url.Parse(w.loc)
rolie, err := w.expr.Eval(
"$.distributions[*].rolie.feeds", w.metadataProvider)
if err != nil { if err != nil {
log.Printf("rolie check failed: %v\n", err)
return nil, err return nil, err
} }
fs, hasRolie := rolie.([]interface{}) afp := csaf.NewAdvisoryFileProcessor(
hasRolie = hasRolie && len(fs) > 0 w.client,
w.expr,
w.metadataProvider,
base)
if hasRolie { if err := afp.Process(w.mirrorFiles); err != nil {
if err := w.handleROLIE(rolie, w.mirrorFiles); err != nil {
return nil, err return nil, err
} }
} else {
// No rolie feeds -> try to load files from index.txt
baseURL, err := util.BaseURL(w.loc)
if err != nil {
return nil, err
}
files, err := w.loadIndex(baseURL)
if err != nil {
return nil, err
}
_ = files
// XXX: Is treating as white okay? better look into the advisories?
white := csaf.TLPLabel(csaf.TLPLabelWhite)
if err := w.mirrorFiles(&white, files); err != nil {
return nil, err
}
} // TODO: else scan directories?
if err := w.writeIndices(); err != nil { if err := w.writeIndices(); err != nil {
return nil, err return nil, err
@ -183,7 +100,7 @@ func (w *worker) mirrorInternal() (*csaf.AggregatorCSAFProvider, error) {
// Add us as a mirror. // Add us as a mirror.
mirrorURL := csaf.ProviderURL( mirrorURL := csaf.ProviderURL(
fmt.Sprintf("%s/.well-known/csaf-aggregator/%s/provider-metadata.json", fmt.Sprintf("%s/.well-known/csaf-aggregator/%s/provider-metadata.json",
w.cfg.Domain, w.provider.Name)) w.processor.cfg.Domain, w.provider.Name))
acp.Mirrors = []csaf.ProviderURL{ acp.Mirrors = []csaf.ProviderURL{
mirrorURL, mirrorURL,
@ -207,7 +124,7 @@ func (w *worker) writeProviderMetadata() error {
fname := filepath.Join(w.dir, "provider-metadata.json") fname := filepath.Join(w.dir, "provider-metadata.json")
pm := csaf.NewProviderMetadataPrefix( pm := csaf.NewProviderMetadataPrefix(
w.cfg.Domain+"/.well-known/csaf-aggregator/"+w.provider.Name, w.processor.cfg.Domain+"/.well-known/csaf-aggregator/"+w.provider.Name,
w.labelsFromSummaries()) w.labelsFromSummaries())
// Figure out the role // Figure out the role
@ -255,7 +172,7 @@ func (w *worker) mirrorPGPKeys(pm *csaf.ProviderMetadata) error {
localKeyURL := func(fingerprint string) string { localKeyURL := func(fingerprint string) string {
return fmt.Sprintf("%s/.well-known/csaf-aggregator/%s/openpgp/%s.asc", return fmt.Sprintf("%s/.well-known/csaf-aggregator/%s/openpgp/%s.asc",
w.cfg.Domain, w.provider.Name, fingerprint) w.processor.cfg.Domain, w.provider.Name, fingerprint)
} }
for i := range pm.PGPKeys { for i := range pm.PGPKeys {
@ -311,12 +228,12 @@ func (w *worker) mirrorPGPKeys(pm *csaf.ProviderMetadata) error {
// If we have public key configured copy it into the new folder // If we have public key configured copy it into the new folder
if w.cfg.OpenPGPPublicKey == "" { if w.processor.cfg.OpenPGPPublicKey == "" {
return nil return nil
} }
// Load the key for the fingerprint. // Load the key for the fingerprint.
data, err := os.ReadFile(w.cfg.OpenPGPPublicKey) data, err := os.ReadFile(w.processor.cfg.OpenPGPPublicKey)
if err != nil { if err != nil {
os.RemoveAll(openPGPFolder) os.RemoveAll(openPGPFolder)
return err return err
@ -390,7 +307,7 @@ func (w *worker) createAggregatorProvider() (*csaf.AggregatorCSAFProvider, error
func (w *worker) doMirrorTransaction() error { func (w *worker) doMirrorTransaction() error {
webTarget := filepath.Join( webTarget := filepath.Join(
w.cfg.Web, ".well-known", "csaf-aggregator", w.provider.Name) w.processor.cfg.Web, ".well-known", "csaf-aggregator", w.provider.Name)
var oldWeb string var oldWeb string
@ -408,7 +325,7 @@ func (w *worker) doMirrorTransaction() error {
} }
// Check if there is a sysmlink already. // Check if there is a sysmlink already.
target := filepath.Join(w.cfg.Folder, w.provider.Name) target := filepath.Join(w.processor.cfg.Folder, w.provider.Name)
log.Printf("target: '%s'\n", target) log.Printf("target: '%s'\n", target)
exists, err := util.PathExists(target) exists, err := util.PathExists(target)
@ -472,14 +389,14 @@ func (w *worker) downloadSignature(path string) (string, error) {
// sign signs the given data with the configured key. // sign signs the given data with the configured key.
func (w *worker) sign(data []byte) (string, error) { func (w *worker) sign(data []byte) (string, error) {
if w.signRing == nil { if w.signRing == nil {
key, err := w.cfg.privateOpenPGPKey() key, err := w.processor.cfg.privateOpenPGPKey()
if err != nil { if err != nil {
return "", err return "", err
} }
if key == nil { if key == nil {
return "", nil return "", nil
} }
if pp := w.cfg.Passphrase; pp != nil { if pp := w.processor.cfg.Passphrase; pp != nil {
if key, err = key.Unlock([]byte(*pp)); err != nil { if key, err = key.Unlock([]byte(*pp)); err != nil {
return "", err return "", err
} }
@ -496,11 +413,8 @@ func (w *worker) sign(data []byte) (string, error) {
sig.Data, constants.PGPSignatureHeader, "", "") sig.Data, constants.PGPSignatureHeader, "", "")
} }
func (w *worker) mirrorFiles(tlpLabel *csaf.TLPLabel, files []string) error { func (w *worker) mirrorFiles(tlpLabel csaf.TLPLabel, files []csaf.AdvisoryFile) error {
label := "unknown" label := strings.ToLower(string(tlpLabel))
if tlpLabel != nil {
label = strings.ToLower(string(*tlpLabel))
}
summaries := w.summaries[label] summaries := w.summaries[label]
@ -514,7 +428,7 @@ func (w *worker) mirrorFiles(tlpLabel *csaf.TLPLabel, files []string) error {
yearDirs := make(map[int]string) yearDirs := make(map[int]string)
for _, file := range files { for _, file := range files {
u, err := url.Parse(file) u, err := url.Parse(file.URL())
if err != nil { if err != nil {
log.Printf("error: %s\n", err) log.Printf("error: %s\n", err)
continue continue
@ -539,22 +453,37 @@ func (w *worker) mirrorFiles(tlpLabel *csaf.TLPLabel, files []string) error {
return json.NewDecoder(tee).Decode(&advisory) return json.NewDecoder(tee).Decode(&advisory)
} }
if err := downloadJSON(w.client, file, download); err != nil { if err := downloadJSON(w.client, file.URL(), download); err != nil {
log.Printf("error: %v\n", err) log.Printf("error: %v\n", err)
continue continue
} }
// Check against CSAF schema.
errors, err := csaf.ValidateCSAF(advisory) errors, err := csaf.ValidateCSAF(advisory)
if err != nil { if err != nil {
log.Printf("error: %s: %v", file, err) log.Printf("error: %s: %v", file, err)
continue continue
} }
if len(errors) > 0 { if len(errors) > 0 {
log.Printf("CSAF file %s has %d validation errors.", log.Printf("CSAF file %s has %d validation errors.\n",
file, len(errors)) file, len(errors))
continue continue
} }
// Check against remote validator.
if rmv := w.processor.remoteValidator; rmv != nil {
valid, err := rmv.Validate(advisory)
if err != nil {
log.Printf("Calling remote validator failed: %s\n", err)
continue
}
if !valid {
log.Printf(
"CSAF file %s does not validate remotely.\n", file)
continue
}
}
sum, err := csaf.NewAdvisorySummary(w.expr, advisory) sum, err := csaf.NewAdvisorySummary(w.expr, advisory)
if err != nil { if err != nil {
log.Printf("error: %s: %v\n", file, err) log.Printf("error: %s: %v\n", file, err)
@ -563,7 +492,7 @@ func (w *worker) mirrorFiles(tlpLabel *csaf.TLPLabel, files []string) error {
summaries = append(summaries, summary{ summaries = append(summaries, summary{
filename: filename, filename: filename,
summary: sum, summary: sum,
url: file, url: file.URL(),
}) })
year := sum.InitialReleaseDate.Year() year := sum.InitialReleaseDate.Year()
@ -589,7 +518,7 @@ func (w *worker) mirrorFiles(tlpLabel *csaf.TLPLabel, files []string) error {
} }
// Try to fetch signature file. // Try to fetch signature file.
sigURL := file + ".asc" sigURL := file.SignURL()
ascFile := fname + ".asc" ascFile := fname + ".asc"
if err := w.downloadSignatureOrSign(sigURL, ascFile, data); err != nil { if err := w.downloadSignatureOrSign(sigURL, ascFile, data); err != nil {
return err return err

View file

@ -20,7 +20,11 @@ import (
) )
type processor struct { type processor struct {
// cfg is the global configuration.
cfg *config cfg *config
// remoteValidator is a globally configured remote validator.
remoteValidator csaf.RemoteValidator
} }
type summary struct { type summary struct {
@ -31,8 +35,9 @@ type summary struct {
type worker struct { type worker struct {
num int num int
processor *processor
expr *util.PathEval expr *util.PathEval
cfg *config
signRing *crypto.KeyRing signRing *crypto.KeyRing
client util.Client // client per provider client util.Client // client per provider
@ -43,10 +48,10 @@ type worker struct {
summaries map[string][]summary // the summaries of the advisories. summaries map[string][]summary // the summaries of the advisories.
} }
func newWorker(num int, config *config) *worker { func newWorker(num int, processor *processor) *worker {
return &worker{ return &worker{
num: num, num: num,
cfg: config, processor: processor,
expr: util.NewPathEval(), expr: util.NewPathEval(),
} }
} }
@ -64,7 +69,7 @@ func (w *worker) createDir() (string, error) {
return w.dir, nil return w.dir, nil
} }
dir, err := util.MakeUniqDir( dir, err := util.MakeUniqDir(
filepath.Join(w.cfg.Folder, w.provider.Name)) filepath.Join(w.processor.cfg.Folder, w.provider.Name))
if err == nil { if err == nil {
w.dir = dir w.dir = dir
} }

View file

@ -1,28 +0,0 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"log"
"net/url"
)
// resolveURLs resolves a list of URLs urls against a base URL base.
func resolveURLs(urls []string, base *url.URL) []string {
out := make([]string, 0, len(urls))
for _, u := range urls {
p, err := url.Parse(u)
if err != nil {
log.Printf("error: Invalid URL '%s': %v\n", u, err)
continue
}
out = append(out, base.ResolveReference(p).String())
}
return out
}

View file

@ -26,7 +26,12 @@ type (
) )
func (pgs pages) listed(path string, pro *processor) (bool, error) { func (pgs pages) listed(path string, pro *processor) (bool, error) {
base, err := util.BaseURL(path) pathURL, err := url.Parse(path)
if err != nil {
return false, err
}
base, err := util.BaseURL(pathURL)
if err != nil { if err != nil {
return false, err return false, err
} }

View file

@ -38,7 +38,7 @@ type options struct {
func errCheck(err error) { func errCheck(err error) {
if err != nil { if err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp { if flags.WroteHelp(err) {
os.Exit(0) os.Exit(0)
} }
log.Fatalf("error: %v\n", err) log.Fatalf("error: %v\n", err)

View file

@ -329,44 +329,8 @@ func (p *processor) httpClient() util.Client {
var yearFromURL = regexp.MustCompile(`.*/(\d{4})/[^/]+$`) var yearFromURL = regexp.MustCompile(`.*/(\d{4})/[^/]+$`)
// checkFile constructs the urls of a remote file.
type checkFile interface {
url() string
sha256() string
sha512() string
sign() string
}
// stringFile is a simple implementation of checkFile.
// The hash and signature files are directly constructed by extending
// the file name.
type stringFile string
func (sf stringFile) url() string { return string(sf) }
func (sf stringFile) sha256() string { return string(sf) + ".sha256" }
func (sf stringFile) sha512() string { return string(sf) + ".sha512" }
func (sf stringFile) sign() string { return string(sf) + ".asc" }
// hashFile is a more involed version of checkFile.
// Here each component can be given explicitly.
// If a component is not given it is constructed by
// extending the first component.
type hashFile [4]string
func (hf hashFile) name(i int, ext string) string {
if hf[i] != "" {
return hf[i]
}
return hf[0] + ext
}
func (hf hashFile) url() string { return hf[0] }
func (hf hashFile) sha256() string { return hf.name(1, ".sha256") }
func (hf hashFile) sha512() string { return hf.name(2, ".sha512") }
func (hf hashFile) sign() string { return hf.name(3, ".asc") }
func (p *processor) integrity( func (p *processor) integrity(
files []checkFile, files []csaf.AdvisoryFile,
base string, base string,
mask whereType, mask whereType,
lg func(MessageType, string, ...interface{}), lg func(MessageType, string, ...interface{}),
@ -380,7 +344,7 @@ func (p *processor) integrity(
var data bytes.Buffer var data bytes.Buffer
for _, f := range files { for _, f := range files {
fp, err := url.Parse(f.url()) fp, err := url.Parse(f.URL())
if err != nil { if err != nil {
lg(ErrorType, "Bad URL %s: %v", f, err) lg(ErrorType, "Bad URL %s: %v", f, err)
continue continue
@ -452,8 +416,8 @@ func (p *processor) integrity(
url func() string url func() string
hash []byte hash []byte
}{ }{
{"SHA256", f.sha256, s256.Sum(nil)}, {"SHA256", f.SHA256URL, s256.Sum(nil)},
{"SHA512", f.sha512, s512.Sum(nil)}, {"SHA512", f.SHA512URL, s512.Sum(nil)},
} { } {
hu, err := url.Parse(x.url()) hu, err := url.Parse(x.url())
if err != nil { if err != nil {
@ -490,9 +454,9 @@ func (p *processor) integrity(
} }
// Check signature // Check signature
su, err := url.Parse(f.sign()) su, err := url.Parse(f.SignURL())
if err != nil { if err != nil {
lg(ErrorType, "Bad URL %s: %v", f.sign(), err) lg(ErrorType, "Bad URL %s: %v", f.SignURL(), err)
continue continue
} }
sigFile := b.ResolveReference(su).String() sigFile := b.ResolveReference(su).String()
@ -585,14 +549,20 @@ func (p *processor) processROLIEFeed(feed string) error {
} }
} }
base, err := util.BaseURL(feed) feedURL, err := url.Parse(feed)
if err != nil {
p.badProviderMetadata.error("Bad base path: %v", err)
return errContinue
}
base, err := util.BaseURL(feedURL)
if err != nil { if err != nil {
p.badProviderMetadata.error("Bad base path: %v", err) p.badProviderMetadata.error("Bad base path: %v", err)
return errContinue return errContinue
} }
// Extract the CSAF files from feed. // Extract the CSAF files from feed.
var files []checkFile var files []csaf.AdvisoryFile
rfeed.Entries(func(entry *csaf.Entry) { rfeed.Entries(func(entry *csaf.Entry) {
@ -636,12 +606,12 @@ func (p *processor) processROLIEFeed(feed string) error {
return return
} }
var file checkFile var file csaf.AdvisoryFile
if sha256 != "" || sha512 != "" || sign != "" { if sha256 != "" || sha512 != "" || sign != "" {
file = hashFile{url, sha256, sha512, sign} file = csaf.HashedAdvisoryFile{url, sha256, sha512, sign}
} else { } else {
file = stringFile(url) file = csaf.PlainAdvisoryFile(url)
} }
files = append(files, file) files = append(files, file)
@ -688,12 +658,12 @@ func (p *processor) checkIndex(base string, mask whereType) error {
return errContinue return errContinue
} }
files, err := func() ([]checkFile, error) { files, err := func() ([]csaf.AdvisoryFile, error) {
defer res.Body.Close() defer res.Body.Close()
var files []checkFile var files []csaf.AdvisoryFile
scanner := bufio.NewScanner(res.Body) scanner := bufio.NewScanner(res.Body)
for scanner.Scan() { for scanner.Scan() {
files = append(files, stringFile(scanner.Text())) files = append(files, csaf.PlainAdvisoryFile(scanner.Text()))
} }
return files, scanner.Err() return files, scanner.Err()
}() }()
@ -730,10 +700,10 @@ func (p *processor) checkChanges(base string, mask whereType) error {
return errContinue return errContinue
} }
times, files, err := func() ([]time.Time, []checkFile, error) { times, files, err := func() ([]time.Time, []csaf.AdvisoryFile, error) {
defer res.Body.Close() defer res.Body.Close()
var times []time.Time var times []time.Time
var files []checkFile var files []csaf.AdvisoryFile
c := csv.NewReader(res.Body) c := csv.NewReader(res.Body)
for { for {
r, err := c.Read() r, err := c.Read()
@ -750,7 +720,7 @@ func (p *processor) checkChanges(base string, mask whereType) error {
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
times, files = append(times, t), append(files, stringFile(r[1])) times, files = append(times, t), append(files, csaf.PlainAdvisoryFile(r[1]))
} }
return times, files, nil return times, files, nil
}() }()
@ -817,7 +787,11 @@ func (p *processor) checkCSAFs(domain string) error {
} }
// No rolie feeds // No rolie feeds
base, err := util.BaseURL(p.pmdURL) pmdURL, err := url.Parse(p.pmdURL)
if err != nil {
return err
}
base, err := util.BaseURL(pmdURL)
if err != nil { if err != nil {
return err return err
} }

View file

@ -113,7 +113,7 @@ func (r *securityReporter) report(p *processor, domain *Domain) {
return return
} }
if len(p.badSecurity) == 0 { if len(p.badSecurity) == 0 {
req.message(InfoType, "Found good security.txt.") req.message(InfoType, "Found CSAF entry in security.txt.")
return return
} }
req.Messages = p.badSecurity req.Messages = p.badSecurity

View file

@ -0,0 +1,502 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"bytes"
"crypto/sha256"
"crypto/sha512"
"crypto/tls"
"encoding/json"
"fmt"
"hash"
"io"
"log"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/csaf-poc/csaf_distribution/csaf"
"github.com/csaf-poc/csaf_distribution/util"
"golang.org/x/time/rate"
)
type downloader struct {
client util.Client
opts *options
directory string
keys []*crypto.KeyRing
eval *util.PathEval
}
func newDownloader(opts *options) *downloader {
return &downloader{
opts: opts,
eval: util.NewPathEval(),
}
}
func (d *downloader) httpClient() util.Client {
if d.client != nil {
return d.client
}
hClient := http.Client{}
var tlsConfig tls.Config
if d.opts.Insecure {
tlsConfig.InsecureSkipVerify = true
hClient.Transport = &http.Transport{
TLSClientConfig: &tlsConfig,
}
}
var client util.Client
if d.opts.Verbose {
client = &util.LoggingClient{Client: &hClient}
} else {
client = &hClient
}
if d.opts.Rate == nil {
d.client = client
return client
}
d.client = &util.LimitingClient{
Client: client,
Limiter: rate.NewLimiter(rate.Limit(*d.opts.Rate), 1),
}
return d.client
}
func (d *downloader) loadProviderMetadataDirectly(path string) *csaf.LoadedProviderMetadata {
client := d.httpClient()
resp, err := client.Get(path)
if err != nil {
log.Printf("Error fetching '%s': %v\n", path, err)
return nil
}
if resp.StatusCode != http.StatusOK {
log.Printf(
"Error fetching '%s': %s (%d)\n", path, resp.Status, resp.StatusCode)
return nil
}
defer resp.Body.Close()
var doc interface{}
if err := json.NewDecoder(resp.Body).Decode(&doc); err != nil {
log.Printf("Decoding '%s' as JSON failed: %v\n", path, err)
return nil
}
errors, err := csaf.ValidateProviderMetadata(doc)
if err != nil {
log.Printf("Schema validation of '%s' failed: %v\n", path, err)
return nil
}
if len(errors) > 0 {
log.Printf(
"Schema validation of '%s' leads to %d issues.\n", path, len(errors))
return nil
}
return &csaf.LoadedProviderMetadata{
Document: doc,
URL: path,
}
}
func (d *downloader) download(domain string) error {
var lpmd *csaf.LoadedProviderMetadata
if strings.HasPrefix(domain, "https://") {
lpmd = d.loadProviderMetadataDirectly(domain)
} else {
lpmd = csaf.LoadProviderMetadataForDomain(
d.httpClient(), domain, func(format string, args ...interface{}) {
log.Printf(
"Looking for provider-metadata.json of '"+domain+"': "+format+"\n", args...)
})
}
if lpmd == nil {
return fmt.Errorf("no provider-metadata.json found for '%s'", domain)
}
base, err := url.Parse(lpmd.URL)
if err != nil {
return fmt.Errorf("invalid URL '%s': %v", lpmd.URL, err)
}
if err := d.loadOpenPGPKeys(
d.httpClient(),
lpmd.Document,
base,
); err != nil {
return err
}
afp := csaf.NewAdvisoryFileProcessor(
d.httpClient(),
d.eval,
lpmd.Document,
base)
return afp.Process(d.downloadFiles)
}
func (d *downloader) loadOpenPGPKeys(
client util.Client,
doc interface{},
base *url.URL,
) error {
src, err := d.eval.Eval("$.public_openpgp_keys", doc)
if err != nil {
// no keys.
return nil
}
var keys []csaf.PGPKey
if err := util.ReMarshalJSON(&keys, src); err != nil {
return err
}
if len(keys) == 0 {
return nil
}
// Try to load
for i := range keys {
key := &keys[i]
if key.URL == nil {
continue
}
up, err := url.Parse(*key.URL)
if err != nil {
log.Printf("Invalid URL '%s': %v", *key.URL, err)
continue
}
u := base.ResolveReference(up).String()
res, err := client.Get(u)
if err != nil {
log.Printf("Fetching public OpenPGP key %s failed: %v.", u, err)
continue
}
if res.StatusCode != http.StatusOK {
log.Printf("Fetching public OpenPGP key %s status code: %d (%s)",
u, res.StatusCode, res.Status)
continue
}
ckey, err := func() (*crypto.Key, error) {
defer res.Body.Close()
return crypto.NewKeyFromArmoredReader(res.Body)
}()
if err != nil {
log.Printf("Reading public OpenPGP key %s failed: %v", u, err)
continue
}
if !strings.EqualFold(ckey.GetFingerprint(), string(key.Fingerprint)) {
log.Printf(
"Fingerprint of public OpenPGP key %s does not match remotely loaded.", u)
continue
}
keyring, err := crypto.NewKeyRing(ckey)
if err != nil {
log.Printf("Creating store for public OpenPGP key %s failed: %v.", u, err)
continue
}
d.keys = append(d.keys, keyring)
}
return nil
}
func (d *downloader) downloadFiles(label csaf.TLPLabel, files []csaf.AdvisoryFile) error {
client := d.httpClient()
var data bytes.Buffer
var lastDir string
lower := strings.ToLower(string(label))
var initialReleaseDate time.Time
dateExtract := util.TimeMatcher(&initialReleaseDate, time.RFC3339)
for _, file := range files {
u, err := url.Parse(file.URL())
if err != nil {
log.Printf("Ignoring invalid URL: %s: %v\n", file.URL(), err)
continue
}
// Ignore not confirming filenames.
filename := filepath.Base(u.Path)
if !util.ConfirmingFileName(filename) {
log.Printf("Not confirming filename %q. Ignoring.\n", filename)
continue
}
resp, err := client.Get(file.URL())
if err != nil {
log.Printf("WARN: cannot get '%s': %v\n", file.URL(), err)
continue
}
if resp.StatusCode != http.StatusOK {
log.Printf("WARN: cannot load %s: %s (%d)\n",
file.URL(), resp.Status, resp.StatusCode)
continue
}
var (
writers []io.Writer
s256, s512 hash.Hash
s256Data, s512Data []byte
remoteSHA256, remoteSHA512 []byte
signData []byte
)
// Only hash when we have a remote counter part we can compare it with.
if remoteSHA256, s256Data, err = d.loadHash(file.SHA256URL()); err != nil {
if d.opts.Verbose {
log.Printf("WARN: cannot fetch %s: %v\n", file.SHA256URL(), err)
}
} else {
s256 = sha256.New()
writers = append(writers, s256)
}
if remoteSHA512, s512Data, err = d.loadHash(file.SHA512URL()); err != nil {
if d.opts.Verbose {
log.Printf("WARN: cannot fetch %s: %v\n", file.SHA512URL(), err)
}
} else {
s512 = sha512.New()
writers = append(writers, s512)
}
// Remember the data as we need to store it to file later.
data.Reset()
writers = append(writers, &data)
// Download the advisory and hash it.
hasher := io.MultiWriter(writers...)
var doc interface{}
if err := func() error {
defer resp.Body.Close()
tee := io.TeeReader(resp.Body, hasher)
return json.NewDecoder(tee).Decode(&doc)
}(); err != nil {
log.Printf("Downloading %s failed: %v", file.URL(), err)
continue
}
// Compare the checksums.
if s256 != nil && !bytes.Equal(s256.Sum(nil), remoteSHA256) {
log.Printf("SHA256 checksum of %s does not match.\n", file.URL())
continue
}
if s512 != nil && !bytes.Equal(s512.Sum(nil), remoteSHA512) {
log.Printf("SHA512 checksum of %s does not match.\n", file.URL())
continue
}
// Only check signature if we have loaded keys.
if len(d.keys) > 0 {
var sign *crypto.PGPSignature
sign, signData, err = d.loadSignature(file.SignURL())
if err != nil {
if d.opts.Verbose {
log.Printf("downloading signature '%s' failed: %v\n",
file.SignURL(), err)
}
}
if sign != nil {
if !d.checkSignature(data.Bytes(), sign) {
log.Printf("Cannot verify signature for %s\n", file.URL())
continue
}
}
}
// Validate against CSAF schema.
errors, err := csaf.ValidateCSAF(doc)
if err != nil {
log.Printf("Failed to validate %s: %v", file.URL(), err)
continue
}
if len(errors) > 0 {
log.Printf("CSAF file %s has %d validation errors.", file.URL(), len(errors))
continue
}
if err := d.eval.Extract(`$.document.tracking.initial_release_date`, dateExtract, false, doc); err != nil {
log.Printf("Cannot extract initial_release_date from advisory '%s'\n", file.URL())
initialReleaseDate = time.Now()
}
initialReleaseDate = initialReleaseDate.UTC()
// Write advisory to file
newDir := path.Join(d.directory, lower, strconv.Itoa(initialReleaseDate.Year()))
if newDir != lastDir {
if err := os.MkdirAll(newDir, 0755); err != nil {
return err
}
lastDir = newDir
}
path := filepath.Join(lastDir, filename)
if err := os.WriteFile(path, data.Bytes(), 0644); err != nil {
return err
}
// Write hash sums.
if s256Data != nil {
if err := os.WriteFile(path+".sha256", s256Data, 0644); err != nil {
return err
}
}
if s512Data != nil {
if err := os.WriteFile(path+".sha512", s512Data, 0644); err != nil {
return err
}
}
// Write signature.
if signData != nil {
if err := os.WriteFile(path+".asc", signData, 0644); err != nil {
return err
}
}
log.Printf("Written advisory '%s'.\n", path)
}
return nil
}
func (d *downloader) checkSignature(data []byte, sign *crypto.PGPSignature) bool {
pm := crypto.NewPlainMessage(data)
t := crypto.GetUnixTime()
for _, key := range d.keys {
if err := key.VerifyDetached(pm, sign, t); err == nil {
return true
}
}
return false
}
func (d *downloader) loadSignature(p string) (*crypto.PGPSignature, []byte, error) {
resp, err := d.httpClient().Get(p)
if err != nil {
return nil, nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, nil, fmt.Errorf(
"fetching signature from '%s' failed: %s (%d)", p, resp.Status, resp.StatusCode)
}
defer resp.Body.Close()
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
sign, err := crypto.NewPGPSignatureFromArmored(string(data))
if err != nil {
return nil, nil, err
}
return sign, data, nil
}
func (d *downloader) loadHash(p string) ([]byte, []byte, error) {
resp, err := d.httpClient().Get(p)
if err != nil {
return nil, nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, nil, fmt.Errorf(
"fetching hash from '%s' failed: %s (%d)", p, resp.Status, resp.StatusCode)
}
defer resp.Body.Close()
var data bytes.Buffer
tee := io.TeeReader(resp.Body, &data)
hash, err := util.HashFromReader(tee)
if err != nil {
return nil, nil, err
}
return hash, data.Bytes(), nil
}
// prepareDirectory ensures that the working directory
// exists and is setup properly.
func (d *downloader) prepareDirectory() error {
// If no special given use current working directory.
if d.opts.Directory == nil {
dir, err := os.Getwd()
if err != nil {
return err
}
d.directory = dir
return nil
}
// Use given directory
if _, err := os.Stat(*d.opts.Directory); err != nil {
// If it does not exist create it.
if os.IsNotExist(err) {
if err = os.MkdirAll(*d.opts.Directory, 0755); err != nil {
return err
}
} else {
return err
}
}
d.directory = *d.opts.Directory
return nil
}
// run performs the downloads for all the given domains.
func (d *downloader) run(domains []string) error {
if err := d.prepareDirectory(); err != nil {
return err
}
for _, domain := range domains {
if err := d.download(domain); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,59 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package main
import (
"fmt"
"log"
"os"
"github.com/csaf-poc/csaf_distribution/util"
"github.com/jessevdk/go-flags"
)
type options struct {
Directory *string `short:"d" long:"directory" description:"Directory to store the downloaded files in"`
Insecure bool `long:"insecure" description:"Do not check TLS certificates from provider"`
Version bool `long:"version" description:"Display version of the binary"`
Verbose bool `long:"verbose" short:"v" description:"Verbose output"`
Rate *float64 `long:"rate" short:"r" description:"The average upper limit of https operations per second"`
}
func errCheck(err error) {
if err != nil {
if flags.WroteHelp(err) {
os.Exit(0)
}
log.Fatalf("error: %v\n", err)
}
}
func main() {
opts := new(options)
parser := flags.NewParser(opts, flags.Default)
parser.Usage = "[OPTIONS] domain..."
domains, err := parser.Parse()
errCheck(err)
if opts.Version {
fmt.Println(util.SemVersion)
return
}
if len(domains) == 0 {
log.Println("No domains given.")
return
}
d := newDownloader(opts)
errCheck(d.run(domains))
}

View file

@ -167,6 +167,21 @@ func (c *controller) upload(r *http.Request) (interface{}, error) {
} }
} }
// Validate against remote validator
if c.cfg.RemoteValidator != nil {
validator, err := c.cfg.RemoteValidator.Open()
if err != nil {
return nil, err
}
valid, err := validator.Validate(content)
if err != nil {
return nil, err
}
if !valid {
return nil, errors.New("does not validate against remote validator")
}
}
ex, err := csaf.NewAdvisorySummary(util.NewPathEval(), content) ex, err := csaf.NewAdvisorySummary(util.NewPathEval(), content)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -55,6 +55,7 @@ type config struct {
ProviderMetaData *providerMetadataConfig `toml:"provider_metadata"` ProviderMetaData *providerMetadataConfig `toml:"provider_metadata"`
UploadLimit *int64 `toml:"upload_limit"` UploadLimit *int64 `toml:"upload_limit"`
Issuer *string `toml:"issuer"` Issuer *string `toml:"issuer"`
RemoteValidator *csaf.RemoteValidatorOptions `toml:"remote_validator"`
} }
func (pmdc *providerMetadataConfig) apply(pmd *csaf.ProviderMetadata) { func (pmdc *providerMetadataConfig) apply(pmd *csaf.ProviderMetadata) {

View file

@ -13,6 +13,7 @@ import (
"log" "log"
"net/http" "net/http"
"net/http/cgi" "net/http/cgi"
"os"
"github.com/csaf-poc/csaf_distribution/util" "github.com/csaf-poc/csaf_distribution/util"
"github.com/jessevdk/go-flags" "github.com/jessevdk/go-flags"
@ -22,6 +23,16 @@ type options struct {
Version bool `long:"version" description:"Display version of the binary"` Version bool `long:"version" description:"Display version of the binary"`
} }
const cgiRequired = "The csaf_provider is a cgi binary and is designed to be served via a web server."
func ensureCGI() {
if _, ok := os.LookupEnv("REQUEST_METHOD"); !ok {
fmt.Println(cgiRequired)
fmt.Println("Version: " + util.SemVersion)
os.Exit(1)
}
}
func main() { func main() {
var opts options var opts options
parser := flags.NewParser(&opts, flags.Default) parser := flags.NewParser(&opts, flags.Default)
@ -31,6 +42,8 @@ func main() {
return return
} }
ensureCGI()
cfg, err := loadConfig() cfg, err := loadConfig()
if err != nil { if err != nil {
cgi.Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { cgi.Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {

View file

@ -355,7 +355,7 @@ func readInteractive(prompt string, pw **string) error {
func check(err error) { func check(err error) {
if err != nil { if err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp { if flags.WroteHelp(err) {
os.Exit(0) os.Exit(0)
} }
log.Fatalf("error: %v\n", err) log.Fatalf("error: %v\n", err)

274
csaf/advisories.go Normal file
View file

@ -0,0 +1,274 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package csaf
import (
"bufio"
"log"
"net/http"
"net/url"
"strings"
"github.com/csaf-poc/csaf_distribution/util"
)
// AdvisoryFile constructs the urls of a remote file.
type AdvisoryFile interface {
URL() string
SHA256URL() string
SHA512URL() string
SignURL() string
}
// PlainAdvisoryFile is a simple implementation of checkFile.
// The hash and signature files are directly constructed by extending
// the file name.
type PlainAdvisoryFile string
// URL returns the URL of this advisory.
func (paf PlainAdvisoryFile) URL() string { return string(paf) }
// SHA256URL returns the URL of SHA256 hash file of this advisory.
func (paf PlainAdvisoryFile) SHA256URL() string { return string(paf) + ".sha256" }
// SHA512URL returns the URL of SHA512 hash file of this advisory.
func (paf PlainAdvisoryFile) SHA512URL() string { return string(paf) + ".sha512" }
// SignURL returns the URL of signature file of this advisory.
func (paf PlainAdvisoryFile) SignURL() string { return string(paf) + ".asc" }
// HashedAdvisoryFile is a more involed version of checkFile.
// Here each component can be given explicitly.
// If a component is not given it is constructed by
// extending the first component.
type HashedAdvisoryFile [4]string
func (haf HashedAdvisoryFile) name(i int, ext string) string {
if haf[i] != "" {
return haf[i]
}
return haf[0] + ext
}
// URL returns the URL of this advisory.
func (haf HashedAdvisoryFile) URL() string { return haf[0] }
// SHA256URL returns the URL of SHA256 hash file of this advisory.
func (haf HashedAdvisoryFile) SHA256URL() string { return haf.name(1, ".sha256") }
// SHA512URL returns the URL of SHA512 hash file of this advisory.
func (haf HashedAdvisoryFile) SHA512URL() string { return haf.name(2, ".sha512") }
// SignURL returns the URL of signature file of this advisory.
func (haf HashedAdvisoryFile) SignURL() string { return haf.name(3, ".asc") }
// AdvisoryFileProcessor implements the extraction of
// advisory file names from a given provider metadata.
type AdvisoryFileProcessor struct {
client util.Client
expr *util.PathEval
doc interface{}
base *url.URL
}
// NewAdvisoryFileProcessor constructs an filename extractor
// for a given metadata document.
func NewAdvisoryFileProcessor(
client util.Client,
expr *util.PathEval,
doc interface{},
base *url.URL,
) *AdvisoryFileProcessor {
return &AdvisoryFileProcessor{
client: client,
expr: expr,
doc: doc,
base: base,
}
}
// Process extracts the adivisory filenames and passes them with
// the corresponding label to fn.
func (afp *AdvisoryFileProcessor) Process(fn func(TLPLabel, []AdvisoryFile) error) error {
// Check if we have ROLIE feeds.
rolie, err := afp.expr.Eval(
"$.distributions[*].rolie.feeds", afp.doc)
if err != nil {
log.Printf("rolie check failed: %v\n", err)
return err
}
fs, hasRolie := rolie.([]interface{})
hasRolie = hasRolie && len(fs) > 0
if hasRolie {
var feeds [][]Feed
if err := util.ReMarshalJSON(&feeds, rolie); err != nil {
return err
}
log.Printf("Found %d ROLIE feed(s).\n", len(feeds))
for _, feed := range feeds {
if err := afp.processROLIE(feed, fn); err != nil {
return err
}
}
} else {
// No rolie feeds -> try to load files from index.txt
files, err := afp.loadIndex()
if err != nil {
return err
}
// XXX: Is treating as white okay? better look into the advisories?
if err := fn(TLPLabelWhite, files); err != nil {
return err
}
} // TODO: else scan directories?
return nil
}
// loadIndex loads baseURL/index.txt and returns a list of files
// prefixed by baseURL/.
func (afp *AdvisoryFileProcessor) loadIndex() ([]AdvisoryFile, error) {
baseURL, err := util.BaseURL(afp.base)
if err != nil {
return nil, err
}
indexURL := baseURL + "/index.txt"
resp, err := afp.client.Get(indexURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var files []AdvisoryFile
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
files = append(files, PlainAdvisoryFile(baseURL+"/"+scanner.Text()))
}
if err := scanner.Err(); err != nil {
return nil, err
}
return files, nil
}
func (afp *AdvisoryFileProcessor) processROLIE(
labeledFeeds []Feed,
fn func(TLPLabel, []AdvisoryFile) error,
) error {
for i := range labeledFeeds {
feed := &labeledFeeds[i]
if feed.URL == nil {
continue
}
up, err := url.Parse(string(*feed.URL))
if err != nil {
log.Printf("Invalid URL %s in feed: %v.", *feed.URL, err)
continue
}
feedURL := afp.base.ResolveReference(up)
log.Printf("Feed URL: %s\n", feedURL)
fb, err := util.BaseURL(feedURL)
if err != nil {
log.Printf("error: Invalid feed base URL '%s': %v\n", fb, err)
continue
}
feedBaseURL, err := url.Parse(fb)
if err != nil {
log.Printf("error: Cannot parse feed base URL '%s': %v\n", fb, err)
continue
}
res, err := afp.client.Get(feedURL.String())
if err != nil {
log.Printf("error: Cannot get feed '%s'\n", err)
continue
}
if res.StatusCode != http.StatusOK {
log.Printf("error: Fetching %s failed. Status code %d (%s)",
feedURL, res.StatusCode, res.Status)
continue
}
rfeed, err := func() (*ROLIEFeed, error) {
defer res.Body.Close()
return LoadROLIEFeed(res.Body)
}()
if err != nil {
log.Printf("Loading ROLIE feed failed: %v.", err)
continue
}
var files []AdvisoryFile
resolve := func(u string) string {
if u == "" {
return ""
}
p, err := url.Parse(u)
if err != nil {
log.Printf("error: Invalid URL '%s': %v", u, err)
return ""
}
return feedBaseURL.ResolveReference(p).String()
}
rfeed.Entries(func(entry *Entry) {
var self, sha256, sha512, sign string
for i := range entry.Link {
link := &entry.Link[i]
lower := strings.ToLower(link.HRef)
switch link.Rel {
case "self":
self = resolve(link.HRef)
case "signature":
sign = resolve(link.HRef)
case "hash":
switch {
case strings.HasSuffix(lower, ".sha256"):
sha256 = resolve(link.HRef)
case strings.HasSuffix(lower, ".sha512"):
sha512 = resolve(link.HRef)
}
}
}
if self == "" {
return
}
var file AdvisoryFile
if sha256 != "" || sha512 != "" || sign != "" {
file = HashedAdvisoryFile{self, sha256, sha512, sign}
} else {
file = PlainAdvisoryFile(self)
}
files = append(files, file)
})
var label TLPLabel
if feed.TLPLabel != nil {
label = *feed.TLPLabel
} else {
label = "unknown"
}
if err := fn(label, files); err != nil {
return err
}
}
return nil
}

View file

@ -37,11 +37,11 @@ const (
) )
var tlpLabelPattern = alternativesUnmarshal( var tlpLabelPattern = alternativesUnmarshal(
string(TLPLabelUnlabeled), TLPLabelUnlabeled,
string(TLPLabelWhite), TLPLabelWhite,
string(TLPLabelGreen), TLPLabelGreen,
string(TLPLabelAmber), TLPLabelAmber,
string(TLPLabelRed), TLPLabelRed,
) )
// JSONURL is an URL to JSON document. // JSONURL is an URL to JSON document.
@ -313,7 +313,7 @@ func (a *Aggregator) Validate() error {
} }
} }
if a.LastUpdated == nil { if a.LastUpdated == nil {
return errors.New("Aggregator.LastUpdate == nil") return errors.New("aggregator.LastUpdate == nil")
} }
return nil return nil
} }

281
csaf/remotevalidation.go Normal file
View file

@ -0,0 +1,281 @@
// This file is Free Software under the MIT License
// without warranty, see README.md and LICENSES/MIT.txt for details.
//
// SPDX-License-Identifier: MIT
//
// SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
// Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
package csaf
import (
"bytes"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"net/http"
"sync"
bolt "go.etcd.io/bbolt"
)
// defaultURL is default URL where to look for
// the validation service.
const (
defaultURL = "http://localhost:3000"
validationPath = "/api/v1/validate"
)
// defaultPresets are the presets to check.
var defaultPresets = []string{"mandatory"}
var (
validationsBucket = []byte("validations")
validFalse = []byte{0}
validTrue = []byte{1}
)
// RemoteValidatorOptions are the configuation options
// the remote validation service.
type RemoteValidatorOptions struct {
URL string `json:"url" toml:"url"`
Presets []string `json:"presets" toml:"presets"`
Cache string `json:"cache" toml:"cache"`
}
type test struct {
Type string `json:"type"`
Name string `json:"name"`
}
// outDocument is the document send to the remote validation service.
type outDocument struct {
Tests []test `json:"tests"`
Document interface{} `json:"document"`
}
// inDocument is the document recieved from the remote validation service.
type inDocument struct {
Valid bool `json:"isValid"`
}
var errNotFound = errors.New("not found")
type cache interface {
get(key []byte) (bool, error)
set(key []byte, valid bool) error
Close() error
}
// RemoteValidator validates an advisory document remotely.
type RemoteValidator interface {
Validate(doc interface{}) (bool, error)
Close() error
}
// SynchronizedRemoteValidator returns a serialized variant
// of the given remote validator.
func SynchronizedRemoteValidator(validator RemoteValidator) RemoteValidator {
return &syncedRemoteValidator{RemoteValidator: validator}
}
// remoteValidator is an implementation of an RemoteValidator.
type remoteValidator struct {
url string
tests []test
cache cache
}
// syncedRemoteValidator is a serialized variant of a remote validator.
type syncedRemoteValidator struct {
sync.Mutex
RemoteValidator
}
// Validate implements the validation part of the RemoteValidator interface.
func (srv *syncedRemoteValidator) Validate(doc interface{}) (bool, error) {
srv.Lock()
defer srv.Unlock()
return srv.RemoteValidator.Validate(doc)
}
// Validate implements the closing part of the RemoteValidator interface.
func (srv *syncedRemoteValidator) Close() error {
srv.Lock()
defer srv.Unlock()
return srv.RemoteValidator.Close()
}
// prepareTests precompiles the presets for the remote check.
func prepareTests(presets []string) []test {
if len(presets) == 0 {
presets = defaultPresets
}
tests := make([]test, len(presets))
for i := range tests {
tests[i] = test{Type: "preset", Name: presets[i]}
}
return tests
}
// prepareURL prepares the URL to be called for validation.
func prepareURL(url string) string {
if url == "" {
return defaultURL + validationPath
}
return url + validationPath
}
// prepareCache sets up the cache if it is configured.
func prepareCache(config string) (cache, error) {
if config == "" {
return nil, nil
}
db, err := bolt.Open(config, 0600, nil)
if err != nil {
return nil, err
}
// Create the bucket.
if err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(validationsBucket)
return err
}); err != nil {
db.Close()
return nil, err
}
return boltCache{db}, nil
}
// boltCache is cache implementation based on the bolt datastore.
type boltCache struct{ *bolt.DB }
// get implements the fetch part of the cache interface.
func (bc boltCache) get(key []byte) (valid bool, err error) {
err2 := bc.View(func(tx *bolt.Tx) error {
b := tx.Bucket(validationsBucket)
v := b.Get(key)
if v == nil {
err = errNotFound
} else {
valid = v[0] != 0
}
return nil
})
if err2 != nil {
err = err2
}
return
}
// get implements the store part of the cache interface.
func (bc boltCache) set(key []byte, valid bool) error {
return bc.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(validationsBucket)
if valid {
return b.Put(key, validTrue)
}
return b.Put(key, validFalse)
})
}
// Open opens a new remoteValidator.
func (rvo *RemoteValidatorOptions) Open() (RemoteValidator, error) {
cache, err := prepareCache(rvo.Cache)
if err != nil {
return nil, err
}
return &remoteValidator{
url: prepareURL(rvo.URL),
tests: prepareTests(rvo.Presets),
cache: cache,
}, nil
}
// Close closes the remote validator.
func (v *remoteValidator) Close() error {
if v.cache != nil {
return v.cache.Close()
}
return nil
}
// key calculates the key for an advisory document and presets.
func (v *remoteValidator) key(doc interface{}) ([]byte, error) {
h := sha256.New()
if err := json.NewEncoder(h).Encode(doc); err != nil {
return nil, err
}
for i := range v.tests {
if _, err := h.Write([]byte(v.tests[i].Name)); err != nil {
return nil, err
}
}
return h.Sum(nil), nil
}
// Validate executes a remote validation of an advisory.
func (v *remoteValidator) Validate(doc interface{}) (bool, error) {
var key []byte
if v.cache != nil {
var err error
if key, err = v.key(doc); err != nil {
return false, err
}
valid, err := v.cache.get(key)
if err != errNotFound {
if err != nil {
return false, err
}
return valid, nil
}
}
o := outDocument{
Document: doc,
Tests: v.tests,
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(&o); err != nil {
return false, err
}
resp, err := http.Post(
v.url,
"application/json",
bytes.NewReader(buf.Bytes()))
if err != nil {
return false, err
}
if resp.StatusCode != http.StatusOK {
return false, fmt.Errorf(
"POST failed: %s (%d)", resp.Status, resp.StatusCode)
}
valid, err := func() (bool, error) {
defer resp.Body.Close()
var in inDocument
return in.Valid, json.NewDecoder(resp.Body).Decode(&in)
}()
if err != nil {
return false, err
}
if key != nil {
// store in cache
if err := v.cache.set(key, valid); err != nil {
return valid, err
}
}
return valid, nil
}

View file

@ -132,19 +132,6 @@ func (rf *ROLIEFeed) EntryByID(id string) *Entry {
return nil return nil
} }
// Files extracts the files from the feed.
func (rf *ROLIEFeed) Files(filter string) []string {
var files []string
for _, f := range rf.Feed.Entry {
for i := range f.Link {
if link := &f.Link[i]; link.Rel == filter {
files = append(files, link.HRef)
}
}
}
return files
}
// Entries visits the entries of this feed. // Entries visits the entries of this feed.
func (rf *ROLIEFeed) Entries(fn func(*Entry)) { func (rf *ROLIEFeed) Entries(fn func(*Entry)) {
for _, e := range rf.Feed.Entry { for _, e := range rf.Feed.Entry {

View file

@ -87,6 +87,7 @@ lock_file // path to lockfile, to stop other instances if one is not
interim_years // limiting the years for which interim documents are searched interim_years // limiting the years for which interim documents are searched
verbose // print more diagnostic output, e.g. https request verbose // print more diagnostic output, e.g. https request
allow_single_provider // debugging option allow_single_provider // debugging option
remote_validator // use remote validation checker
``` ```
Rates are specified as floats in HTTPS operations per second. Rates are specified as floats in HTTPS operations per second.

18
docs/csaf_downloader.md Normal file
View file

@ -0,0 +1,18 @@
## csaf_uploader
### Usage
```
Usage:
csaf_downloader [OPTIONS] domain...
Application Options:
-d, --directory= Directory to store the downloaded files in
--insecure Do not check TLS certificates from provider
--version Display version of the binary
-v, --verbose Verbose output
-r, --rate= The average upper limit of https operations per second
Help Options:
-h, --help Show this help message
```

View file

@ -1,8 +1,9 @@
`csaf_provider` implements the CGI interface for webservers `csaf_provider` implements the CGI interface for webservers
and reads its configuration from a TOML file. and reads its configuration from a [TOML](https://toml.io/en/) file.
The [setup docs](../README.md#setup-trusted-provider) The [setup docs](../README.md#setup-trusted-provider)
explain how to wire this up with nginx and where the config file lives. explain how to wire this up with nginx and where the config file lives.
## Provider options ## Provider options
Following options are supported in the config file: Following options are supported in the config file:
@ -27,4 +28,14 @@ Following options are supported in the config file:
- provider_metadata: Configure the provider metadata. - provider_metadata: Configure the provider metadata.
- provider_metadata.list_on_CSAF_aggregators: List on aggregators - provider_metadata.list_on_CSAF_aggregators: List on aggregators
- provider_metadata.mirror_on_CSAF_aggregators: Mirror on aggregators - provider_metadata.mirror_on_CSAF_aggregators: Mirror on aggregators
- provider_metadata.publisher: Set the publisher. Default: `{"category"= "vendor", "name"= "Example Company", "namespace"= "https://example.com"}`. - remote_validator: Use a remote validator service. Not used by default.
`{ "url" = "http://localhost:3000", "presets" = ["mandatory"], "cache" = "/var/lib/csaf/validations.db" }`
- provider_metadata.publisher: Set the publisher. Default:
```toml
[provider_metadata.publisher]
category = "vendor"
name = "Example Company"
namespace = "https://example.com"
issuing_authority = "We at Example Company are responsible for publishing and maintaining Product Y."
contact_details = "Example Company can be reached at contact_us@example.com, or via our website at https://www.example.com/contact."
```

View file

@ -109,7 +109,13 @@ sudo chmod g+r,o-rwx /usr/lib/csaf/config.toml
``` ```
<!-- MARKDOWN-AUTO-DOCS:END --> <!-- MARKDOWN-AUTO-DOCS:END -->
**This and the other settings are just examples, please adjust permissions and paths according to your webserver and security needs.** **This and the other settings are just examples,**
**please adjust permissions and paths**
**according to your webserver and security needs.**
Here is a minimal example configuration,
which you need to customize for a production setup,
see the [options of `csaf_provider`](https://github.com/csaf-poc/csaf_distribution/blob/main/docs/csaf_provider.md).
<!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/scripts/setupProviderForITest.sh&lines=94-99) --> <!-- MARKDOWN-AUTO-DOCS:START (CODE:src=../docs/scripts/setupProviderForITest.sh&lines=94-99) -->
<!-- The below code snippet is automatically added from ../docs/scripts/setupProviderForITest.sh --> <!-- The below code snippet is automatically added from ../docs/scripts/setupProviderForITest.sh -->
@ -122,7 +128,7 @@ canonical_url_prefix = "https://localhost:8443"
#no_passphrase = true #no_passphrase = true
``` ```
<!-- MARKDOWN-AUTO-DOCS:END --> <!-- MARKDOWN-AUTO-DOCS:END -->
with suitable [replacements](#provider-options)
**Attention:** You need to properly protect the private keys **Attention:** You need to properly protect the private keys
for the OpenPGP and TLS crypto setup. A few variants are possible for the OpenPGP and TLS crypto setup. A few variants are possible
@ -177,32 +183,6 @@ ln -s /etc/nginx/sites-available/{DNSNAME} /etc/nginx/sites-enabled/
``` ```
Replace {DNSNAME} with a server block file name. Replace {DNSNAME} with a server block file name.
## Provider options
Provider has many config options described as following:
- password: Authentication password for accessing the CSAF provider. This is
a simple authentication method useful for testing or as additional shareable password in combination with TLS client certificates.
- key: The private OpenPGP key.
- folder: Specify the root folder. Default: `/var/www/`.
- web: Specify the web folder. Default: `/var/www/html`.
- tlps: Set the allowed TLP comming with the upload request (one or more of "csaf", "white", "amber", "green", "red").
The "csaf" selection lets the provider takes the value from the CSAF document.
These affects the list items in the web interface.
Default: `["csaf", "white", "amber", "green", "red"]`.
- upload_signature: Send signature with the request, an additional input-field in the web interface will be shown to let user enter an ascii armored signature. Default: `false`.
- openpgp_url: URL to OpenPGP key-server. Default: `https://openpgp.circl.lu`.
- canonical_url_prefix: start of the URL where contents shall be accessible from the internet. Default: `https://$SERVER_NAME`.
- no_passphrase: Let user send the passphrase for the OpenPGP key with the request, if set to true the input-field in the web interface will not appear. Default: `false`.
- no_validation: Validate the uploaded CSAF document against the JSON schema. Default: `false`.
- no_web_ui: Disable the web interface. Default: `false`.
- dynamic_provider_metadata: Take the publisher from the CSAF document. Default: `false`.
- provider_metadata: Configure the provider metadata.
- provider_metadata.list_on_CSAF_aggregators: List on aggregators
- provider_metadata.mirror_on_CSAF_aggregators: Mirror on aggregators
- provider_metadata.publisher: Set the publisher. Default: `{"category"= "vendor", "name"= "Example", "namespace"= "https://example.com"}`.
- upload_limit: Set the upload limit size of the file. Default: `50 MiB`.
- issuer: The issuer of the CA, which if set, restricts the writing permission and the accessing to the web-interface to only the client certificates signed with this CA.
### Security considerations ### Security considerations

View file

@ -25,4 +25,5 @@ Calling example (as root):
./TLSClientConfigsForITest.sh ./TLSClientConfigsForITest.sh
./setupProviderForITest.sh ./setupProviderForITest.sh
./testAggregator.sh ./testAggregator.sh
./testDownloader.sh
``` ```

25
docs/scripts/testDownloader.sh Executable file
View file

@ -0,0 +1,25 @@
#!/usr/bin/env bash
# This file is Free Software under the MIT License
# without warranty, see README.md and LICENSES/MIT.txt for details.
#
# SPDX-License-Identifier: MIT
#
# SPDX-FileCopyrightText: 2022 German Federal Office for Information Security (BSI) <https://www.bsi.bund.de>
# Software-Engineering: 2022 Intevation GmbH <https://intevation.de>
set -e # to exit if a command in the script fails
echo
echo '==== run downloader'
cd ~/csaf_distribution
mkdir ~/downloaded1
./bin-linux-amd64/csaf_downloader --directory ../downloaded1 \
--rate 4.1 --verbose --insecure localhost
echo
echo '==== this was downloaded'
cd ~/downloaded1
find .

1
go.mod
View file

@ -12,6 +12,7 @@ require (
github.com/jessevdk/go-flags v1.5.0 github.com/jessevdk/go-flags v1.5.0
github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir v1.1.0
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 github.com/santhosh-tekuri/jsonschema/v5 v5.0.0
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9 golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
golang.org/x/time v0.0.0-20220411224347-583f2d630306 golang.org/x/time v0.0.0-20220411224347-583f2d630306

3
go.sum
View file

@ -41,6 +41,8 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -66,6 +68,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View file

@ -13,12 +13,8 @@ import (
"strings" "strings"
) )
// BaseURL returns the base URL for a given URL p. // BaseURL returns the base URL for a given URL.
func BaseURL(p string) (string, error) { func BaseURL(u *url.URL) (string, error) {
u, err := url.Parse(p)
if err != nil {
return "", err
}
ep := u.EscapedPath() ep := u.EscapedPath()
if idx := strings.LastIndexByte(ep, '/'); idx != -1 { if idx := strings.LastIndexByte(ep, '/'); idx != -1 {
ep = ep[:idx+1] ep = ep[:idx+1]