diff options
author | Frédéric Guillot <fred@miniflux.net> | 2018-01-02 18:32:01 -0800 |
---|---|---|
committer | Frédéric Guillot <fred@miniflux.net> | 2018-01-02 18:32:01 -0800 |
commit | 3c3f397bf57e232201fccd64bad2820e6ade567a (patch) | |
tree | 5405fe0a5aca27c820bad873bb3e85188efc636d | |
parent | aae3d75490e5bddc7bae8d141d8e6f901c22fdd1 (diff) |
Make sure the scraper parse only HTML documents
-rw-r--r-- | reader/scraper/scraper.go | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/reader/scraper/scraper.go b/reader/scraper/scraper.go index d2e0d4d..4f36d96 100644 --- a/reader/scraper/scraper.go +++ b/reader/scraper/scraper.go @@ -6,6 +6,7 @@ package scraper import ( "errors" + "fmt" "io" "strings" @@ -16,7 +17,7 @@ import ( "github.com/miniflux/miniflux/url" ) -// Fetch download a web page a returns relevant contents. +// Fetch downloads a web page a returns relevant contents. func Fetch(websiteURL, rules string) (string, error) { client := http.NewClient(websiteURL) response, err := client.Get() @@ -25,7 +26,11 @@ func Fetch(websiteURL, rules string) (string, error) { } if response.HasServerFailure() { - return "", errors.New("unable to download web page") + return "", errors.New("scraper: unable to download web page") + } + + if !strings.Contains(response.ContentType, "text/html") { + return "", fmt.Errorf("scraper: this resource is not a HTML document (%s)", response.ContentType) } page, err := response.NormalizeBodyEncoding() @@ -33,7 +38,7 @@ func Fetch(websiteURL, rules string) (string, error) { return "", err } - // The entry URL could be a redirect somewhere else. + // The entry URL could redirect somewhere else. websiteURL = response.EffectiveURL if rules == "" { |