aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com/tdewolff/parse/css
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/tdewolff/parse/css')
-rw-r--r--vendor/github.com/tdewolff/parse/css/README.md171
-rw-r--r--vendor/github.com/tdewolff/parse/css/hash.go689
-rw-r--r--vendor/github.com/tdewolff/parse/css/hash_test.go16
-rw-r--r--vendor/github.com/tdewolff/parse/css/lex.go710
-rw-r--r--vendor/github.com/tdewolff/parse/css/lex_test.go143
-rw-r--r--vendor/github.com/tdewolff/parse/css/parse.go402
-rw-r--r--vendor/github.com/tdewolff/parse/css/parse_test.go249
-rw-r--r--vendor/github.com/tdewolff/parse/css/util.go47
-rw-r--r--vendor/github.com/tdewolff/parse/css/util_test.go34
9 files changed, 0 insertions, 2461 deletions
diff --git a/vendor/github.com/tdewolff/parse/css/README.md b/vendor/github.com/tdewolff/parse/css/README.md
deleted file mode 100644
index 2013248..0000000
--- a/vendor/github.com/tdewolff/parse/css/README.md
+++ /dev/null
@@ -1,171 +0,0 @@
-# CSS [![GoDoc](http://godoc.org/github.com/tdewolff/parse/css?status.svg)](http://godoc.org/github.com/tdewolff/parse/css) [![GoCover](http://gocover.io/_badge/github.com/tdewolff/parse/css)](http://gocover.io/github.com/tdewolff/parse/css)
-
-This package is a CSS3 lexer and parser written in [Go][1]. Both follow the specification at [CSS Syntax Module Level 3](http://www.w3.org/TR/css-syntax-3/). The lexer takes an io.Reader and converts it into tokens until the EOF. The parser returns a parse tree of the full io.Reader input stream, but the low-level `Next` function can be used for stream parsing to returns grammar units until the EOF.
-
-## Installation
-Run the following command
-
- go get github.com/tdewolff/parse/css
-
-or add the following import and run project with `go get`
-
- import "github.com/tdewolff/parse/css"
-
-## Lexer
-### Usage
-The following initializes a new Lexer with io.Reader `r`:
-``` go
-l := css.NewLexer(r)
-```
-
-To tokenize until EOF an error, use:
-``` go
-for {
- tt, text := l.Next()
- switch tt {
- case css.ErrorToken:
- // error or EOF set in l.Err()
- return
- // ...
- }
-}
-```
-
-All tokens (see [CSS Syntax Module Level 3](http://www.w3.org/TR/css3-syntax/)):
-``` go
-ErrorToken // non-official token, returned when errors occur
-IdentToken
-FunctionToken // rgb( rgba( ...
-AtKeywordToken // @abc
-HashToken // #abc
-StringToken
-BadStringToken
-UrlToken // url(
-BadUrlToken
-DelimToken // any unmatched character
-NumberToken // 5
-PercentageToken // 5%
-DimensionToken // 5em
-UnicodeRangeToken
-IncludeMatchToken // ~=
-DashMatchToken // |=
-PrefixMatchToken // ^=
-SuffixMatchToken // $=
-SubstringMatchToken // *=
-ColumnToken // ||
-WhitespaceToken
-CDOToken // <!--
-CDCToken // -->
-ColonToken
-SemicolonToken
-CommaToken
-BracketToken // ( ) [ ] { }, all bracket tokens use this, Data() can distinguish between the brackets
-CommentToken // non-official token
-```
-
-### Examples
-``` go
-package main
-
-import (
- "os"
-
- "github.com/tdewolff/parse/css"
-)
-
-// Tokenize CSS3 from stdin.
-func main() {
- l := css.NewLexer(os.Stdin)
- for {
- tt, text := l.Next()
- switch tt {
- case css.ErrorToken:
- if l.Err() != io.EOF {
- fmt.Println("Error on line", l.Line(), ":", l.Err())
- }
- return
- case css.IdentToken:
- fmt.Println("Identifier", string(text))
- case css.NumberToken:
- fmt.Println("Number", string(text))
- // ...
- }
- }
-}
-```
-
-## Parser
-### Usage
-The following creates a new Parser.
-``` go
-// true because this is the content of an inline style attribute
-p := css.NewParser(bytes.NewBufferString("color: red;"), true)
-```
-
-To iterate over the stylesheet, use:
-``` go
-for {
- gt, _, data := p.Next()
- if gt == css.ErrorGrammar {
- break
- }
- // ...
-}
-```
-
-All grammar units returned by `Next`:
-``` go
-ErrorGrammar
-AtRuleGrammar
-EndAtRuleGrammar
-RulesetGrammar
-EndRulesetGrammar
-DeclarationGrammar
-TokenGrammar
-```
-
-### Examples
-``` go
-package main
-
-import (
- "bytes"
- "fmt"
-
- "github.com/tdewolff/parse/css"
-)
-
-func main() {
- // true because this is the content of an inline style attribute
- p := css.NewParser(bytes.NewBufferString("color: red;"), true)
- out := ""
- for {
- gt, _, data := p.Next()
- if gt == css.ErrorGrammar {
- break
- } else if gt == css.AtRuleGrammar || gt == css.BeginAtRuleGrammar || gt == css.BeginRulesetGrammar || gt == css.DeclarationGrammar {
- out += string(data)
- if gt == css.DeclarationGrammar {
- out += ":"
- }
- for _, val := range p.Values() {
- out += string(val.Data)
- }
- if gt == css.BeginAtRuleGrammar || gt == css.BeginRulesetGrammar {
- out += "{"
- } else if gt == css.AtRuleGrammar || gt == css.DeclarationGrammar {
- out += ";"
- }
- } else {
- out += string(data)
- }
- }
- fmt.Println(out)
-}
-
-```
-
-## License
-Released under the [MIT license](https://github.com/tdewolff/parse/blob/master/LICENSE.md).
-
-[1]: http://golang.org/ "Go Language"
diff --git a/vendor/github.com/tdewolff/parse/css/hash.go b/vendor/github.com/tdewolff/parse/css/hash.go
deleted file mode 100644
index 77f383f..0000000
--- a/vendor/github.com/tdewolff/parse/css/hash.go
+++ /dev/null
@@ -1,689 +0,0 @@
-package css
-
-// generated by hasher -type=Hash -file=hash.go; DO NOT EDIT, except for adding more constants to the list and rerun go generate
-
-// uses github.com/tdewolff/hasher
-//go:generate hasher -type=Hash -file=hash.go
-
-// Hash defines perfect hashes for a predefined list of strings
-type Hash uint32
-
-// Unique hash definitions to be used instead of strings
-const (
- Accelerator Hash = 0x4b30b // accelerator
- Aliceblue Hash = 0x56109 // aliceblue
- Alpha Hash = 0x5eb05 // alpha
- Antiquewhite Hash = 0x4900c // antiquewhite
- Aquamarine Hash = 0x73a0a // aquamarine
- Azimuth Hash = 0x5ef07 // azimuth
- Background Hash = 0xa // background
- Background_Attachment Hash = 0x9115 // background-attachment
- Background_Color Hash = 0x11210 // background-color
- Background_Image Hash = 0x9ca10 // background-image
- Background_Position Hash = 0x13 // background-position
- Background_Position_X Hash = 0x84015 // background-position-x
- Background_Position_Y Hash = 0x15 // background-position-y
- Background_Repeat Hash = 0x1511 // background-repeat
- Behavior Hash = 0x3108 // behavior
- Black Hash = 0x5805 // black
- Blanchedalmond Hash = 0x5d0e // blanchedalmond
- Blueviolet Hash = 0x5660a // blueviolet
- Bold Hash = 0x7204 // bold
- Border Hash = 0x7d06 // border
- Border_Bottom Hash = 0x7d0d // border-bottom
- Border_Bottom_Color Hash = 0x7d13 // border-bottom-color
- Border_Bottom_Style Hash = 0xb413 // border-bottom-style
- Border_Bottom_Width Hash = 0xd713 // border-bottom-width
- Border_Collapse Hash = 0xf80f // border-collapse
- Border_Color Hash = 0x1480c // border-color
- Border_Left Hash = 0x15d0b // border-left
- Border_Left_Color Hash = 0x15d11 // border-left-color
- Border_Left_Style Hash = 0x18911 // border-left-style
- Border_Left_Width Hash = 0x19a11 // border-left-width
- Border_Right Hash = 0x1ab0c // border-right
- Border_Right_Color Hash = 0x1ab12 // border-right-color
- Border_Right_Style Hash = 0x1c912 // border-right-style
- Border_Right_Width Hash = 0x1db12 // border-right-width
- Border_Spacing Hash = 0x1ed0e // border-spacing
- Border_Style Hash = 0x2100c // border-style
- Border_Top Hash = 0x21c0a // border-top
- Border_Top_Color Hash = 0x21c10 // border-top-color
- Border_Top_Style Hash = 0x22c10 // border-top-style
- Border_Top_Width Hash = 0x23c10 // border-top-width
- Border_Width Hash = 0x24c0c // border-width
- Bottom Hash = 0x8406 // bottom
- Box_Shadow Hash = 0x2580a // box-shadow
- Burlywood Hash = 0x26b09 // burlywood
- Cadetblue Hash = 0x28a09 // cadetblue
- Calc Hash = 0x28704 // calc
- Caption_Side Hash = 0x2930c // caption-side
- Charset Hash = 0x47607 // charset
- Chartreuse Hash = 0x2a50a // chartreuse
- Chocolate Hash = 0x2af09 // chocolate
- Clear Hash = 0x2dd05 // clear
- Clip Hash = 0x2e204 // clip
- Color Hash = 0x8b05 // color
- Content Hash = 0x31e07 // content
- Cornflowerblue Hash = 0x3530e // cornflowerblue
- Cornsilk Hash = 0x36108 // cornsilk
- Counter_Increment Hash = 0x36911 // counter-increment
- Counter_Reset Hash = 0x3840d // counter-reset
- Cue Hash = 0x39103 // cue
- Cue_After Hash = 0x39109 // cue-after
- Cue_Before Hash = 0x39a0a // cue-before
- Cursive Hash = 0x3ab07 // cursive
- Cursor Hash = 0x3be06 // cursor
- Darkblue Hash = 0x6a08 // darkblue
- Darkcyan Hash = 0x7508 // darkcyan
- Darkgoldenrod Hash = 0x2730d // darkgoldenrod
- Darkgray Hash = 0x27f08 // darkgray
- Darkgreen Hash = 0x7ca09 // darkgreen
- Darkkhaki Hash = 0x8bd09 // darkkhaki
- Darkmagenta Hash = 0x5280b // darkmagenta
- Darkolivegreen Hash = 0x7590e // darkolivegreen
- Darkorange Hash = 0x7be0a // darkorange
- Darkorchid Hash = 0x8b40a // darkorchid
- Darksalmon Hash = 0x8f80a // darksalmon
- Darkseagreen Hash = 0x95c0c // darkseagreen
- Darkslateblue Hash = 0x3c40d // darkslateblue
- Darkslategray Hash = 0x3d10d // darkslategray
- Darkturquoise Hash = 0x3de0d // darkturquoise
- Darkviolet Hash = 0x3eb0a // darkviolet
- Deeppink Hash = 0x29d08 // deeppink
- Deepskyblue Hash = 0x8cb0b // deepskyblue
- Default Hash = 0x5b707 // default
- Direction Hash = 0xa2909 // direction
- Display Hash = 0x3f507 // display
- Document Hash = 0x40308 // document
- Dodgerblue Hash = 0x40b0a // dodgerblue
- Elevation Hash = 0x4d409 // elevation
- Empty_Cells Hash = 0x4f60b // empty-cells
- Fantasy Hash = 0x60a07 // fantasy
- Filter Hash = 0x5d406 // filter
- Firebrick Hash = 0x41509 // firebrick
- Flex Hash = 0x41e04 // flex
- Float Hash = 0x42205 // float
- Floralwhite Hash = 0x4270b // floralwhite
- Font Hash = 0xce04 // font
- Font_Face Hash = 0xce09 // font-face
- Font_Family Hash = 0x4510b // font-family
- Font_Size Hash = 0x45c09 // font-size
- Font_Size_Adjust Hash = 0x45c10 // font-size-adjust
- Font_Stretch Hash = 0x46c0c // font-stretch
- Font_Style Hash = 0x47d0a // font-style
- Font_Variant Hash = 0x4870c // font-variant
- Font_Weight Hash = 0x4a20b // font-weight
- Forestgreen Hash = 0x3a00b // forestgreen
- Fuchsia Hash = 0x4ad07 // fuchsia
- Gainsboro Hash = 0x17909 // gainsboro
- Ghostwhite Hash = 0x1fa0a // ghostwhite
- Goldenrod Hash = 0x27709 // goldenrod
- Greenyellow Hash = 0x7ce0b // greenyellow
- Height Hash = 0x6ae06 // height
- Honeydew Hash = 0x5f508 // honeydew
- Hsl Hash = 0xe903 // hsl
- Hsla Hash = 0xe904 // hsla
- Ime_Mode Hash = 0x8c508 // ime-mode
- Import Hash = 0x51706 // import
- Important Hash = 0x51709 // important
- Include_Source Hash = 0x82a0e // include-source
- Indianred Hash = 0x52009 // indianred
- Inherit Hash = 0x55507 // inherit
- Initial Hash = 0x55c07 // initial
- Keyframes Hash = 0x43509 // keyframes
- Lavender Hash = 0xeb08 // lavender
- Lavenderblush Hash = 0xeb0d // lavenderblush
- Lawngreen Hash = 0x50e09 // lawngreen
- Layer_Background_Color Hash = 0x10c16 // layer-background-color
- Layer_Background_Image Hash = 0x9c416 // layer-background-image
- Layout_Flow Hash = 0x5370b // layout-flow
- Layout_Grid Hash = 0x57b0b // layout-grid
- Layout_Grid_Char Hash = 0x57b10 // layout-grid-char
- Layout_Grid_Char_Spacing Hash = 0x57b18 // layout-grid-char-spacing
- Layout_Grid_Line Hash = 0x59310 // layout-grid-line
- Layout_Grid_Mode Hash = 0x5a910 // layout-grid-mode
- Layout_Grid_Type Hash = 0x5be10 // layout-grid-type
- Left Hash = 0x16404 // left
- Lemonchiffon Hash = 0xc50c // lemonchiffon
- Letter_Spacing Hash = 0x56d0e // letter-spacing
- Lightblue Hash = 0x5da09 // lightblue
- Lightcoral Hash = 0x5e30a // lightcoral
- Lightcyan Hash = 0x61109 // lightcyan
- Lightgoldenrodyellow Hash = 0x61a14 // lightgoldenrodyellow
- Lightgray Hash = 0x63909 // lightgray
- Lightgreen Hash = 0x6420a // lightgreen
- Lightpink Hash = 0x64c09 // lightpink
- Lightsalmon Hash = 0x6550b // lightsalmon
- Lightseagreen Hash = 0x6600d // lightseagreen
- Lightskyblue Hash = 0x66d0c // lightskyblue
- Lightslateblue Hash = 0x6790e // lightslateblue
- Lightsteelblue Hash = 0x6870e // lightsteelblue
- Lightyellow Hash = 0x6950b // lightyellow
- Limegreen Hash = 0x6a009 // limegreen
- Line_Break Hash = 0x59f0a // line-break
- Line_Height Hash = 0x6a90b // line-height
- Linear_Gradient Hash = 0x6b40f // linear-gradient
- List_Style Hash = 0x6c30a // list-style
- List_Style_Image Hash = 0x6c310 // list-style-image
- List_Style_Position Hash = 0x6d313 // list-style-position
- List_Style_Type Hash = 0x6e60f // list-style-type
- Magenta Hash = 0x52c07 // magenta
- Margin Hash = 0x2f206 // margin
- Margin_Bottom Hash = 0x2f20d // margin-bottom
- Margin_Left Hash = 0x2fe0b // margin-left
- Margin_Right Hash = 0x3310c // margin-right
- Margin_Top Hash = 0x8050a // margin-top
- Marker_Offset Hash = 0x6f50d // marker-offset
- Marks Hash = 0x70205 // marks
- Max_Height Hash = 0x7210a // max-height
- Max_Width Hash = 0x72b09 // max-width
- Media Hash = 0xa4c05 // media
- Mediumaquamarine Hash = 0x73410 // mediumaquamarine
- Mediumblue Hash = 0x7440a // mediumblue
- Mediumorchid Hash = 0x74e0c // mediumorchid
- Mediumpurple Hash = 0x7670c // mediumpurple
- Mediumseagreen Hash = 0x7730e // mediumseagreen
- Mediumslateblue Hash = 0x7810f // mediumslateblue
- Mediumspringgreen Hash = 0x79011 // mediumspringgreen
- Mediumturquoise Hash = 0x7a10f // mediumturquoise
- Mediumvioletred Hash = 0x7b00f // mediumvioletred
- Midnightblue Hash = 0x7de0c // midnightblue
- Min_Height Hash = 0x7ea0a // min-height
- Min_Width Hash = 0x7f409 // min-width
- Mintcream Hash = 0x7fd09 // mintcream
- Mistyrose Hash = 0x81b09 // mistyrose
- Moccasin Hash = 0x82408 // moccasin
- Monospace Hash = 0x8ff09 // monospace
- Namespace Hash = 0x4cc09 // namespace
- Navajowhite Hash = 0x4dc0b // navajowhite
- None Hash = 0x4f304 // none
- Normal Hash = 0x50906 // normal
- Olivedrab Hash = 0x83809 // olivedrab
- Orangered Hash = 0x7c209 // orangered
- Orphans Hash = 0x4bc07 // orphans
- Outline Hash = 0x85507 // outline
- Outline_Color Hash = 0x8550d // outline-color
- Outline_Style Hash = 0x8620d // outline-style
- Outline_Width Hash = 0x86f0d // outline-width
- Overflow Hash = 0xaa08 // overflow
- Overflow_X Hash = 0xaa0a // overflow-x
- Overflow_Y Hash = 0x87c0a // overflow-y
- Padding Hash = 0x2e507 // padding
- Padding_Bottom Hash = 0x2e50e // padding-bottom
- Padding_Left Hash = 0x5490c // padding-left
- Padding_Right Hash = 0x80e0d // padding-right
- Padding_Top Hash = 0x9110b // padding-top
- Page Hash = 0x88604 // page
- Page_Break_After Hash = 0x91b10 // page-break-after
- Page_Break_Before Hash = 0x88611 // page-break-before
- Page_Break_Inside Hash = 0x89711 // page-break-inside
- Palegoldenrod Hash = 0x8a80d // palegoldenrod
- Palegreen Hash = 0x8d609 // palegreen
- Paleturquoise Hash = 0x8df0d // paleturquoise
- Palevioletred Hash = 0x8ec0d // palevioletred
- Papayawhip Hash = 0x9080a // papayawhip
- Pause Hash = 0x92b05 // pause
- Pause_After Hash = 0x92b0b // pause-after
- Pause_Before Hash = 0x9360c // pause-before
- Peachpuff Hash = 0x5cc09 // peachpuff
- Pitch Hash = 0x94205 // pitch
- Pitch_Range Hash = 0x9420b // pitch-range
- Play_During Hash = 0x3f80b // play-during
- Position Hash = 0xb08 // position
- Powderblue Hash = 0x94d0a // powderblue
- Progid Hash = 0x95706 // progid
- Quotes Hash = 0x96806 // quotes
- Radial_Gradient Hash = 0x380f // radial-gradient
- Rgb Hash = 0x8f03 // rgb
- Rgba Hash = 0x8f04 // rgba
- Richness Hash = 0x12108 // richness
- Right Hash = 0x1b205 // right
- Rosybrown Hash = 0x18009 // rosybrown
- Royalblue Hash = 0x13f09 // royalblue
- Ruby_Align Hash = 0x1530a // ruby-align
- Ruby_Overhang Hash = 0x16d0d // ruby-overhang
- Ruby_Position Hash = 0x1bc0d // ruby-position
- Saddlebrown Hash = 0x4c20b // saddlebrown
- Sandybrown Hash = 0x5000a // sandybrown
- Sans_Serif Hash = 0x6010a // sans-serif
- Scrollbar_3d_Light_Color Hash = 0x12818 // scrollbar-3d-light-color
- Scrollbar_Arrow_Color Hash = 0x2c815 // scrollbar-arrow-color
- Scrollbar_Base_Color Hash = 0x43d14 // scrollbar-base-color
- Scrollbar_Dark_Shadow_Color Hash = 0x7061b // scrollbar-dark-shadow-color
- Scrollbar_Face_Color Hash = 0x96d14 // scrollbar-face-color
- Scrollbar_Highlight_Color Hash = 0xa0619 // scrollbar-highlight-color
- Scrollbar_Shadow_Color Hash = 0x98116 // scrollbar-shadow-color
- Scrollbar_Track_Color Hash = 0x99715 // scrollbar-track-color
- Seagreen Hash = 0x66508 // seagreen
- Seashell Hash = 0x10508 // seashell
- Serif Hash = 0x60605 // serif
- Size Hash = 0x46104 // size
- Slateblue Hash = 0x3c809 // slateblue
- Slategray Hash = 0x3d509 // slategray
- Speak Hash = 0x9ac05 // speak
- Speak_Header Hash = 0x9ac0c // speak-header
- Speak_Numeral Hash = 0x9b80d // speak-numeral
- Speak_Punctuation Hash = 0x9da11 // speak-punctuation
- Speech_Rate Hash = 0x9eb0b // speech-rate
- Springgreen Hash = 0x7960b // springgreen
- Steelblue Hash = 0x68c09 // steelblue
- Stress Hash = 0x2c306 // stress
- Supports Hash = 0x9ff08 // supports
- Table_Layout Hash = 0x5310c // table-layout
- Text_Align Hash = 0x2b60a // text-align
- Text_Align_Last Hash = 0x2b60f // text-align-last
- Text_Autospace Hash = 0x2020e // text-autospace
- Text_Decoration Hash = 0x4e50f // text-decoration
- Text_Indent Hash = 0x9f40b // text-indent
- Text_Justify Hash = 0x250c // text-justify
- Text_Kashida_Space Hash = 0x4612 // text-kashida-space
- Text_Overflow Hash = 0xa50d // text-overflow
- Text_Shadow Hash = 0x3080b // text-shadow
- Text_Transform Hash = 0x3240e // text-transform
- Text_Underline_Position Hash = 0x33c17 // text-underline-position
- Top Hash = 0x22303 // top
- Transparent Hash = 0x3790b // transparent
- Turquoise Hash = 0x3e209 // turquoise
- Unicode_Bidi Hash = 0xa1f0c // unicode-bidi
- Vertical_Align Hash = 0x3b00e // vertical-align
- Visibility Hash = 0xa320a // visibility
- Voice_Family Hash = 0xa3c0c // voice-family
- Volume Hash = 0xa4806 // volume
- White Hash = 0x1ff05 // white
- White_Space Hash = 0x4970b // white-space
- Whitesmoke Hash = 0x42d0a // whitesmoke
- Widows Hash = 0x5fc06 // widows
- Width Hash = 0xe505 // width
- Word_Break Hash = 0x2610a // word-break
- Word_Spacing Hash = 0x3120c // word-spacing
- Word_Wrap Hash = 0x54109 // word-wrap
- Writing_Mode Hash = 0x62d0c // writing-mode
- Yellow Hash = 0x62806 // yellow
- Yellowgreen Hash = 0x7d30b // yellowgreen
- Z_Index Hash = 0xa5107 // z-index
-)
-
-// String returns the hash' name.
-func (i Hash) String() string {
- start := uint32(i >> 8)
- n := uint32(i & 0xff)
- if start+n > uint32(len(_Hash_text)) {
- return ""
- }
- return _Hash_text[start : start+n]
-}
-
-// ToHash returns the hash whose name is s. It returns zero if there is no
-// such hash. It is case sensitive.
-func ToHash(s []byte) Hash {
- if len(s) == 0 || len(s) > _Hash_maxLen {
- return 0
- }
- h := uint32(_Hash_hash0)
- for i := 0; i < len(s); i++ {
- h ^= uint32(s[i])
- h *= 16777619
- }
- if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
- t := _Hash_text[i>>8 : i>>8+i&0xff]
- for i := 0; i < len(s); i++ {
- if t[i] != s[i] {
- goto NEXT
- }
- }
- return i
- }
-NEXT:
- if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
- t := _Hash_text[i>>8 : i>>8+i&0xff]
- for i := 0; i < len(s); i++ {
- if t[i] != s[i] {
- return 0
- }
- }
- return i
- }
- return 0
-}
-
-const _Hash_hash0 = 0x700e0976
-const _Hash_maxLen = 27
-const _Hash_text = "background-position-ybackground-repeatext-justifybehavioradi" +
- "al-gradientext-kashida-spaceblackblanchedalmondarkblueboldar" +
- "kcyanborder-bottom-colorgbackground-attachmentext-overflow-x" +
- "border-bottom-stylemonchiffont-faceborder-bottom-widthslaven" +
- "derblushborder-collapseashellayer-background-colorichnesscro" +
- "llbar-3d-light-coloroyalblueborder-coloruby-alignborder-left" +
- "-coloruby-overhangainsborosybrownborder-left-styleborder-lef" +
- "t-widthborder-right-coloruby-positionborder-right-styleborde" +
- "r-right-widthborder-spacinghostwhitext-autospaceborder-style" +
- "border-top-colorborder-top-styleborder-top-widthborder-width" +
- "box-shadoword-breakburlywoodarkgoldenrodarkgraycalcadetbluec" +
- "aption-sideeppinkchartreusechocolatext-align-lastresscrollba" +
- "r-arrow-colorclearclipadding-bottomargin-bottomargin-leftext" +
- "-shadoword-spacingcontentext-transformargin-rightext-underli" +
- "ne-positioncornflowerbluecornsilkcounter-incrementransparent" +
- "counter-resetcue-aftercue-beforestgreencursivertical-aligncu" +
- "rsordarkslatebluedarkslategraydarkturquoisedarkvioletdisplay" +
- "-duringdocumentdodgerbluefirebrickflexfloatfloralwhitesmokey" +
- "framescrollbar-base-colorfont-familyfont-size-adjustfont-str" +
- "etcharsetfont-stylefont-variantiquewhite-spacefont-weightfuc" +
- "hsiacceleratorphansaddlebrownamespacelevationavajowhitext-de" +
- "corationonempty-cellsandybrownormalawngreenimportantindianre" +
- "darkmagentable-layout-floword-wrapadding-leftinheritinitiali" +
- "cebluevioletter-spacinglayout-grid-char-spacinglayout-grid-l" +
- "ine-breaklayout-grid-modefaultlayout-grid-typeachpuffilterli" +
- "ghtbluelightcoralphazimuthoneydewidowsans-serifantasylightcy" +
- "anlightgoldenrodyellowriting-modelightgraylightgreenlightpin" +
- "klightsalmonlightseagreenlightskybluelightslatebluelightstee" +
- "lbluelightyellowlimegreenline-heightlinear-gradientlist-styl" +
- "e-imagelist-style-positionlist-style-typemarker-offsetmarksc" +
- "rollbar-dark-shadow-colormax-heightmax-widthmediumaquamarine" +
- "mediumbluemediumorchidarkolivegreenmediumpurplemediumseagree" +
- "nmediumslatebluemediumspringgreenmediumturquoisemediumviolet" +
- "redarkorangeredarkgreenyellowgreenmidnightbluemin-heightmin-" +
- "widthmintcreamargin-topadding-rightmistyrosemoccasinclude-so" +
- "urceolivedrabackground-position-xoutline-coloroutline-styleo" +
- "utline-widthoverflow-ypage-break-beforepage-break-insidepale" +
- "goldenrodarkorchidarkkhakime-modeepskybluepalegreenpaleturqu" +
- "oisepalevioletredarksalmonospacepapayawhipadding-topage-brea" +
- "k-afterpause-afterpause-beforepitch-rangepowderblueprogidark" +
- "seagreenquotescrollbar-face-colorscrollbar-shadow-colorscrol" +
- "lbar-track-colorspeak-headerspeak-numeralayer-background-ima" +
- "gespeak-punctuationspeech-ratext-indentsupportscrollbar-high" +
- "light-colorunicode-bidirectionvisibilityvoice-familyvolumedi" +
- "az-index"
-
-var _Hash_table = [1 << 9]Hash{
- 0x0: 0x5000a, // sandybrown
- 0x1: 0x22303, // top
- 0x4: 0x13f09, // royalblue
- 0x6: 0x4e50f, // text-decoration
- 0xb: 0x5370b, // layout-flow
- 0xc: 0x11210, // background-color
- 0xd: 0x8406, // bottom
- 0x10: 0x6600d, // lightseagreen
- 0x11: 0x8cb0b, // deepskyblue
- 0x12: 0x3c809, // slateblue
- 0x13: 0x4f60b, // empty-cells
- 0x14: 0x2e204, // clip
- 0x15: 0x7440a, // mediumblue
- 0x16: 0x4cc09, // namespace
- 0x18: 0x2f20d, // margin-bottom
- 0x1a: 0x1480c, // border-color
- 0x1b: 0x5f508, // honeydew
- 0x1d: 0x24c0c, // border-width
- 0x1e: 0x9ac0c, // speak-header
- 0x1f: 0x8ec0d, // palevioletred
- 0x20: 0x1ed0e, // border-spacing
- 0x22: 0x2e507, // padding
- 0x23: 0x3310c, // margin-right
- 0x27: 0x7f409, // min-width
- 0x29: 0x8f03, // rgb
- 0x2a: 0x6950b, // lightyellow
- 0x2c: 0x91b10, // page-break-after
- 0x2d: 0x31e07, // content
- 0x30: 0x250c, // text-justify
- 0x32: 0x2b60f, // text-align-last
- 0x34: 0x96d14, // scrollbar-face-color
- 0x35: 0x43509, // keyframes
- 0x36: 0x27f08, // darkgray
- 0x37: 0x52c07, // magenta
- 0x38: 0x3d509, // slategray
- 0x3a: 0x9ca10, // background-image
- 0x3c: 0x82a0e, // include-source
- 0x3d: 0x68c09, // steelblue
- 0x3e: 0x8550d, // outline-color
- 0x40: 0xf80f, // border-collapse
- 0x41: 0xeb08, // lavender
- 0x42: 0x9ff08, // supports
- 0x44: 0x6a90b, // line-height
- 0x45: 0x9da11, // speak-punctuation
- 0x46: 0xa320a, // visibility
- 0x47: 0x2dd05, // clear
- 0x4b: 0x5660a, // blueviolet
- 0x4e: 0x5b707, // default
- 0x50: 0x6f50d, // marker-offset
- 0x52: 0x36911, // counter-increment
- 0x53: 0x6790e, // lightslateblue
- 0x54: 0x10508, // seashell
- 0x56: 0x1bc0d, // ruby-position
- 0x57: 0x8620d, // outline-style
- 0x58: 0x66508, // seagreen
- 0x59: 0x8b05, // color
- 0x5c: 0x2930c, // caption-side
- 0x5d: 0x6ae06, // height
- 0x5e: 0x7810f, // mediumslateblue
- 0x5f: 0x9360c, // pause-before
- 0x60: 0xc50c, // lemonchiffon
- 0x63: 0x3ab07, // cursive
- 0x66: 0x4dc0b, // navajowhite
- 0x67: 0xa3c0c, // voice-family
- 0x68: 0x2730d, // darkgoldenrod
- 0x69: 0x41509, // firebrick
- 0x6a: 0x47d0a, // font-style
- 0x6b: 0xa2909, // direction
- 0x6d: 0x7be0a, // darkorange
- 0x6f: 0x4870c, // font-variant
- 0x70: 0x2f206, // margin
- 0x71: 0x88611, // page-break-before
- 0x73: 0xa50d, // text-overflow
- 0x74: 0x4612, // text-kashida-space
- 0x75: 0x36108, // cornsilk
- 0x76: 0x4a20b, // font-weight
- 0x77: 0x46104, // size
- 0x78: 0x57b0b, // layout-grid
- 0x79: 0x9110b, // padding-top
- 0x7a: 0x47607, // charset
- 0x7d: 0x81b09, // mistyrose
- 0x7e: 0x5ef07, // azimuth
- 0x7f: 0x92b0b, // pause-after
- 0x83: 0x28704, // calc
- 0x84: 0x3be06, // cursor
- 0x85: 0xe903, // hsl
- 0x86: 0x56d0e, // letter-spacing
- 0x88: 0x7ca09, // darkgreen
- 0x8b: 0x40308, // document
- 0x8d: 0x39109, // cue-after
- 0x8f: 0x39a0a, // cue-before
- 0x91: 0x60a07, // fantasy
- 0x94: 0x16d0d, // ruby-overhang
- 0x95: 0x2e50e, // padding-bottom
- 0x9a: 0x5da09, // lightblue
- 0x9c: 0x8f80a, // darksalmon
- 0x9d: 0x45c10, // font-size-adjust
- 0x9e: 0x64c09, // lightpink
- 0xa0: 0x95c0c, // darkseagreen
- 0xa2: 0x89711, // page-break-inside
- 0xa4: 0x27709, // goldenrod
- 0xa5: 0x63909, // lightgray
- 0xa6: 0xa4c05, // media
- 0xa7: 0x57b18, // layout-grid-char-spacing
- 0xa9: 0x51709, // important
- 0xaa: 0x7ea0a, // min-height
- 0xb0: 0x15d11, // border-left-color
- 0xb1: 0x88604, // page
- 0xb2: 0x9c416, // layer-background-image
- 0xb5: 0x59310, // layout-grid-line
- 0xb6: 0x1511, // background-repeat
- 0xb7: 0x7d13, // border-bottom-color
- 0xb9: 0x2580a, // box-shadow
- 0xbb: 0x5490c, // padding-left
- 0xbc: 0x1b205, // right
- 0xc0: 0x43d14, // scrollbar-base-color
- 0xc1: 0x41e04, // flex
- 0xc2: 0xe505, // width
- 0xc5: 0x3e209, // turquoise
- 0xc8: 0x42205, // float
- 0xca: 0x1530a, // ruby-align
- 0xcb: 0xb08, // position
- 0xcc: 0x8050a, // margin-top
- 0xce: 0x2fe0b, // margin-left
- 0xcf: 0x3080b, // text-shadow
- 0xd0: 0x2610a, // word-break
- 0xd4: 0x42d0a, // whitesmoke
- 0xd6: 0x33c17, // text-underline-position
- 0xd7: 0x1db12, // border-right-width
- 0xd8: 0x83809, // olivedrab
- 0xd9: 0x8d609, // palegreen
- 0xdb: 0x51706, // import
- 0xdc: 0x70205, // marks
- 0xdd: 0x3eb0a, // darkviolet
- 0xde: 0x13, // background-position
- 0xe0: 0x73410, // mediumaquamarine
- 0xe1: 0x7204, // bold
- 0xe2: 0x7a10f, // mediumturquoise
- 0xe4: 0x8a80d, // palegoldenrod
- 0xe5: 0x5280b, // darkmagenta
- 0xe6: 0x18009, // rosybrown
- 0xe7: 0x19a11, // border-left-width
- 0xe8: 0x8bd09, // darkkhaki
- 0xea: 0x5d0e, // blanchedalmond
- 0xeb: 0x55c07, // initial
- 0xec: 0x7061b, // scrollbar-dark-shadow-color
- 0xee: 0x4c20b, // saddlebrown
- 0xef: 0x8df0d, // paleturquoise
- 0xf1: 0x1ab12, // border-right-color
- 0xf3: 0x1ff05, // white
- 0xf7: 0xa0619, // scrollbar-highlight-color
- 0xf9: 0x5a910, // layout-grid-mode
- 0xfc: 0x2100c, // border-style
- 0xfe: 0x6d313, // list-style-position
- 0x100: 0x10c16, // layer-background-color
- 0x102: 0x5be10, // layout-grid-type
- 0x103: 0x15d0b, // border-left
- 0x104: 0xaa08, // overflow
- 0x105: 0x7de0c, // midnightblue
- 0x10b: 0x2b60a, // text-align
- 0x10e: 0x22c10, // border-top-style
- 0x110: 0x61a14, // lightgoldenrodyellow
- 0x114: 0x7d06, // border
- 0x119: 0xce04, // font
- 0x11c: 0x73a0a, // aquamarine
- 0x11d: 0x6420a, // lightgreen
- 0x11e: 0x62806, // yellow
- 0x120: 0x9ac05, // speak
- 0x121: 0x4970b, // white-space
- 0x123: 0x3c40d, // darkslateblue
- 0x125: 0x2020e, // text-autospace
- 0x128: 0xeb0d, // lavenderblush
- 0x12c: 0x6550b, // lightsalmon
- 0x12d: 0x55507, // inherit
- 0x131: 0x8b40a, // darkorchid
- 0x132: 0x21c0a, // border-top
- 0x133: 0x3f80b, // play-during
- 0x137: 0x23c10, // border-top-width
- 0x139: 0x4bc07, // orphans
- 0x13a: 0x4510b, // font-family
- 0x13d: 0x40b0a, // dodgerblue
- 0x13f: 0x9080a, // papayawhip
- 0x140: 0x92b05, // pause
- 0x142: 0x6b40f, // linear-gradient
- 0x143: 0x3530e, // cornflowerblue
- 0x144: 0x3f507, // display
- 0x146: 0x56109, // aliceblue
- 0x14a: 0x6a08, // darkblue
- 0x14b: 0x3108, // behavior
- 0x14c: 0x3840d, // counter-reset
- 0x14d: 0x7ce0b, // greenyellow
- 0x14e: 0x79011, // mediumspringgreen
- 0x14f: 0x94d0a, // powderblue
- 0x150: 0x57b10, // layout-grid-char
- 0x158: 0x85507, // outline
- 0x159: 0x26b09, // burlywood
- 0x15b: 0xd713, // border-bottom-width
- 0x15c: 0x4f304, // none
- 0x15e: 0x39103, // cue
- 0x15f: 0x5310c, // table-layout
- 0x160: 0x9420b, // pitch-range
- 0x161: 0xa5107, // z-index
- 0x162: 0x2c306, // stress
- 0x163: 0x84015, // background-position-x
- 0x165: 0x50906, // normal
- 0x167: 0x7670c, // mediumpurple
- 0x169: 0x5e30a, // lightcoral
- 0x16c: 0x7210a, // max-height
- 0x16d: 0x8f04, // rgba
- 0x16e: 0x6c310, // list-style-image
- 0x170: 0x29d08, // deeppink
- 0x173: 0x95706, // progid
- 0x175: 0x7960b, // springgreen
- 0x176: 0x3a00b, // forestgreen
- 0x178: 0x3790b, // transparent
- 0x179: 0x82408, // moccasin
- 0x17a: 0x7b00f, // mediumvioletred
- 0x17e: 0x9f40b, // text-indent
- 0x181: 0x6e60f, // list-style-type
- 0x182: 0x17909, // gainsboro
- 0x183: 0x3de0d, // darkturquoise
- 0x184: 0x3d10d, // darkslategray
- 0x189: 0xaa0a, // overflow-x
- 0x18b: 0x96806, // quotes
- 0x18c: 0x9115, // background-attachment
- 0x18f: 0x1ab0c, // border-right
- 0x191: 0x5805, // black
- 0x192: 0x7d30b, // yellowgreen
- 0x194: 0x5cc09, // peachpuff
- 0x197: 0x4270b, // floralwhite
- 0x19c: 0x7590e, // darkolivegreen
- 0x19d: 0x54109, // word-wrap
- 0x19e: 0x18911, // border-left-style
- 0x1a0: 0x9eb0b, // speech-rate
- 0x1a1: 0x86f0d, // outline-width
- 0x1a2: 0xa1f0c, // unicode-bidi
- 0x1a3: 0x6c30a, // list-style
- 0x1a4: 0x94205, // pitch
- 0x1a5: 0x99715, // scrollbar-track-color
- 0x1a6: 0x4ad07, // fuchsia
- 0x1a8: 0x3b00e, // vertical-align
- 0x1ad: 0x5eb05, // alpha
- 0x1ae: 0x72b09, // max-width
- 0x1af: 0x12108, // richness
- 0x1b0: 0x380f, // radial-gradient
- 0x1b1: 0x80e0d, // padding-right
- 0x1b2: 0x2c815, // scrollbar-arrow-color
- 0x1b3: 0x16404, // left
- 0x1b5: 0x4d409, // elevation
- 0x1b6: 0x59f0a, // line-break
- 0x1ba: 0x2af09, // chocolate
- 0x1bb: 0x9b80d, // speak-numeral
- 0x1bd: 0x4b30b, // accelerator
- 0x1be: 0x6a009, // limegreen
- 0x1c1: 0x7508, // darkcyan
- 0x1c3: 0x66d0c, // lightskyblue
- 0x1c5: 0x6010a, // sans-serif
- 0x1c6: 0x7d0d, // border-bottom
- 0x1c7: 0xa, // background
- 0x1c8: 0xa4806, // volume
- 0x1ca: 0x62d0c, // writing-mode
- 0x1cb: 0x12818, // scrollbar-3d-light-color
- 0x1cc: 0x5fc06, // widows
- 0x1cf: 0x45c09, // font-size
- 0x1d0: 0x15, // background-position-y
- 0x1d1: 0x61109, // lightcyan
- 0x1d4: 0x52009, // indianred
- 0x1d7: 0x1fa0a, // ghostwhite
- 0x1db: 0x7c209, // orangered
- 0x1dc: 0x4900c, // antiquewhite
- 0x1dd: 0x50e09, // lawngreen
- 0x1df: 0x7730e, // mediumseagreen
- 0x1e0: 0x21c10, // border-top-color
- 0x1e2: 0xe904, // hsla
- 0x1e4: 0x3240e, // text-transform
- 0x1e6: 0x74e0c, // mediumorchid
- 0x1e9: 0x8ff09, // monospace
- 0x1ec: 0x98116, // scrollbar-shadow-color
- 0x1ed: 0x6870e, // lightsteelblue
- 0x1ef: 0x28a09, // cadetblue
- 0x1f0: 0x5d406, // filter
- 0x1f1: 0x1c912, // border-right-style
- 0x1f6: 0x87c0a, // overflow-y
- 0x1f7: 0xce09, // font-face
- 0x1f8: 0x3120c, // word-spacing
- 0x1fa: 0xb413, // border-bottom-style
- 0x1fb: 0x46c0c, // font-stretch
- 0x1fc: 0x7fd09, // mintcream
- 0x1fd: 0x8c508, // ime-mode
- 0x1fe: 0x2a50a, // chartreuse
- 0x1ff: 0x60605, // serif
-}
diff --git a/vendor/github.com/tdewolff/parse/css/hash_test.go b/vendor/github.com/tdewolff/parse/css/hash_test.go
deleted file mode 100644
index e176cc1..0000000
--- a/vendor/github.com/tdewolff/parse/css/hash_test.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package css // import "github.com/tdewolff/parse/css"
-
-import (
- "testing"
-
- "github.com/tdewolff/test"
-)
-
-func TestHashTable(t *testing.T) {
- test.T(t, ToHash([]byte("font")), Font, "'font' must resolve to hash.Font")
- test.T(t, Font.String(), "font")
- test.T(t, Margin_Left.String(), "margin-left")
- test.T(t, ToHash([]byte("")), Hash(0), "empty string must resolve to zero")
- test.T(t, Hash(0xffffff).String(), "")
- test.T(t, ToHash([]byte("fonts")), Hash(0), "'fonts' must resolve to zero")
-}
diff --git a/vendor/github.com/tdewolff/parse/css/lex.go b/vendor/github.com/tdewolff/parse/css/lex.go
deleted file mode 100644
index 3924bb7..0000000
--- a/vendor/github.com/tdewolff/parse/css/lex.go
+++ /dev/null
@@ -1,710 +0,0 @@
-// Package css is a CSS3 lexer and parser following the specifications at http://www.w3.org/TR/css-syntax-3/.
-package css // import "github.com/tdewolff/parse/css"
-
-// TODO: \uFFFD replacement character for NULL bytes in strings for example, or atleast don't end the string early
-
-import (
- "bytes"
- "io"
- "strconv"
-
- "github.com/tdewolff/parse"
- "github.com/tdewolff/parse/buffer"
-)
-
-// TokenType determines the type of token, eg. a number or a semicolon.
-type TokenType uint32
-
-// TokenType values.
-const (
- ErrorToken TokenType = iota // extra token when errors occur
- IdentToken
- FunctionToken // rgb( rgba( ...
- AtKeywordToken // @abc
- HashToken // #abc
- StringToken
- BadStringToken
- URLToken
- BadURLToken
- DelimToken // any unmatched character
- NumberToken // 5
- PercentageToken // 5%
- DimensionToken // 5em
- UnicodeRangeToken // U+554A
- IncludeMatchToken // ~=
- DashMatchToken // |=
- PrefixMatchToken // ^=
- SuffixMatchToken // $=
- SubstringMatchToken // *=
- ColumnToken // ||
- WhitespaceToken // space \t \r \n \f
- CDOToken // <!--
- CDCToken // -->
- ColonToken // :
- SemicolonToken // ;
- CommaToken // ,
- LeftBracketToken // [
- RightBracketToken // ]
- LeftParenthesisToken // (
- RightParenthesisToken // )
- LeftBraceToken // {
- RightBraceToken // }
- CommentToken // extra token for comments
- EmptyToken
- CustomPropertyNameToken
- CustomPropertyValueToken
-)
-
-// String returns the string representation of a TokenType.
-func (tt TokenType) String() string {
- switch tt {
- case ErrorToken:
- return "Error"
- case IdentToken:
- return "Ident"
- case FunctionToken:
- return "Function"
- case AtKeywordToken:
- return "AtKeyword"
- case HashToken:
- return "Hash"
- case StringToken:
- return "String"
- case BadStringToken:
- return "BadString"
- case URLToken:
- return "URL"
- case BadURLToken:
- return "BadURL"
- case DelimToken:
- return "Delim"
- case NumberToken:
- return "Number"
- case PercentageToken:
- return "Percentage"
- case DimensionToken:
- return "Dimension"
- case UnicodeRangeToken:
- return "UnicodeRange"
- case IncludeMatchToken:
- return "IncludeMatch"
- case DashMatchToken:
- return "DashMatch"
- case PrefixMatchToken:
- return "PrefixMatch"
- case SuffixMatchToken:
- return "SuffixMatch"
- case SubstringMatchToken:
- return "SubstringMatch"
- case ColumnToken:
- return "Column"
- case WhitespaceToken:
- return "Whitespace"
- case CDOToken:
- return "CDO"
- case CDCToken:
- return "CDC"
- case ColonToken:
- return "Colon"
- case SemicolonToken:
- return "Semicolon"
- case CommaToken:
- return "Comma"
- case LeftBracketToken:
- return "LeftBracket"
- case RightBracketToken:
- return "RightBracket"
- case LeftParenthesisToken:
- return "LeftParenthesis"
- case RightParenthesisToken:
- return "RightParenthesis"
- case LeftBraceToken:
- return "LeftBrace"
- case RightBraceToken:
- return "RightBrace"
- case CommentToken:
- return "Comment"
- case EmptyToken:
- return "Empty"
- case CustomPropertyNameToken:
- return "CustomPropertyName"
- case CustomPropertyValueToken:
- return "CustomPropertyValue"
- }
- return "Invalid(" + strconv.Itoa(int(tt)) + ")"
-}
-
-////////////////////////////////////////////////////////////////
-
-// Lexer is the state for the lexer.
-type Lexer struct {
- r *buffer.Lexer
-}
-
-// NewLexer returns a new Lexer for a given io.Reader.
-func NewLexer(r io.Reader) *Lexer {
- return &Lexer{
- buffer.NewLexer(r),
- }
-}
-
-// Err returns the error encountered during lexing, this is often io.EOF but also other errors can be returned.
-func (l *Lexer) Err() error {
- return l.r.Err()
-}
-
-// Restore restores the NULL byte at the end of the buffer.
-func (l *Lexer) Restore() {
- l.r.Restore()
-}
-
-// Next returns the next Token. It returns ErrorToken when an error was encountered. Using Err() one can retrieve the error message.
-func (l *Lexer) Next() (TokenType, []byte) {
- switch l.r.Peek(0) {
- case ' ', '\t', '\n', '\r', '\f':
- l.r.Move(1)
- for l.consumeWhitespace() {
- }
- return WhitespaceToken, l.r.Shift()
- case ':':
- l.r.Move(1)
- return ColonToken, l.r.Shift()
- case ';':
- l.r.Move(1)
- return SemicolonToken, l.r.Shift()
- case ',':
- l.r.Move(1)
- return CommaToken, l.r.Shift()
- case '(', ')', '[', ']', '{', '}':
- if t := l.consumeBracket(); t != ErrorToken {
- return t, l.r.Shift()
- }
- case '#':
- if l.consumeHashToken() {
- return HashToken, l.r.Shift()
- }
- case '"', '\'':
- if t := l.consumeString(); t != ErrorToken {
- return t, l.r.Shift()
- }
- case '.', '+':
- if t := l.consumeNumeric(); t != ErrorToken {
- return t, l.r.Shift()
- }
- case '-':
- if t := l.consumeNumeric(); t != ErrorToken {
- return t, l.r.Shift()
- } else if t := l.consumeIdentlike(); t != ErrorToken {
- return t, l.r.Shift()
- } else if l.consumeCDCToken() {
- return CDCToken, l.r.Shift()
- } else if l.consumeCustomVariableToken() {
- return CustomPropertyNameToken, l.r.Shift()
- }
- case '@':
- if l.consumeAtKeywordToken() {
- return AtKeywordToken, l.r.Shift()
- }
- case '$', '*', '^', '~':
- if t := l.consumeMatch(); t != ErrorToken {
- return t, l.r.Shift()
- }
- case '/':
- if l.consumeComment() {
- return CommentToken, l.r.Shift()
- }
- case '<':
- if l.consumeCDOToken() {
- return CDOToken, l.r.Shift()
- }
- case '\\':
- if t := l.consumeIdentlike(); t != ErrorToken {
- return t, l.r.Shift()
- }
- case 'u', 'U':
- if l.consumeUnicodeRangeToken() {
- return UnicodeRangeToken, l.r.Shift()
- } else if t := l.consumeIdentlike(); t != ErrorToken {
- return t, l.r.Shift()
- }
- case '|':
- if t := l.consumeMatch(); t != ErrorToken {
- return t, l.r.Shift()
- } else if l.consumeColumnToken() {
- return ColumnToken, l.r.Shift()
- }
- case 0:
- if l.Err() != nil {
- return ErrorToken, nil
- }
- default:
- if t := l.consumeNumeric(); t != ErrorToken {
- return t, l.r.Shift()
- } else if t := l.consumeIdentlike(); t != ErrorToken {
- return t, l.r.Shift()
- }
- }
- // can't be rune because consumeIdentlike consumes that as an identifier
- l.r.Move(1)
- return DelimToken, l.r.Shift()
-}
-
-////////////////////////////////////////////////////////////////
-
-/*
-The following functions follow the railroad diagrams in http://www.w3.org/TR/css3-syntax/
-*/
-
-func (l *Lexer) consumeByte(c byte) bool {
- if l.r.Peek(0) == c {
- l.r.Move(1)
- return true
- }
- return false
-}
-
-func (l *Lexer) consumeComment() bool {
- if l.r.Peek(0) != '/' || l.r.Peek(1) != '*' {
- return false
- }
- l.r.Move(2)
- for {
- c := l.r.Peek(0)
- if c == 0 && l.Err() != nil {
- break
- } else if c == '*' && l.r.Peek(1) == '/' {
- l.r.Move(2)
- return true
- }
- l.r.Move(1)
- }
- return true
-}
-
-func (l *Lexer) consumeNewline() bool {
- c := l.r.Peek(0)
- if c == '\n' || c == '\f' {
- l.r.Move(1)
- return true
- } else if c == '\r' {
- if l.r.Peek(1) == '\n' {
- l.r.Move(2)
- } else {
- l.r.Move(1)
- }
- return true
- }
- return false
-}
-
-func (l *Lexer) consumeWhitespace() bool {
- c := l.r.Peek(0)
- if c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' {
- l.r.Move(1)
- return true
- }
- return false
-}
-
-func (l *Lexer) consumeDigit() bool {
- c := l.r.Peek(0)
- if c >= '0' && c <= '9' {
- l.r.Move(1)
- return true
- }
- return false
-}
-
-func (l *Lexer) consumeHexDigit() bool {
- c := l.r.Peek(0)
- if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
- l.r.Move(1)
- return true
- }
- return false
-}
-
-func (l *Lexer) consumeEscape() bool {
- if l.r.Peek(0) != '\\' {
- return false
- }
- mark := l.r.Pos()
- l.r.Move(1)
- if l.consumeNewline() {
- l.r.Rewind(mark)
- return false
- } else if l.consumeHexDigit() {
- for k := 1; k < 6; k++ {
- if !l.consumeHexDigit() {
- break
- }
- }
- l.consumeWhitespace()
- return true
- } else {
- c := l.r.Peek(0)
- if c >= 0xC0 {
- _, n := l.r.PeekRune(0)
- l.r.Move(n)
- return true
- } else if c == 0 && l.r.Err() != nil {
- return true
- }
- }
- l.r.Move(1)
- return true
-}
-
-func (l *Lexer) consumeIdentToken() bool {
- mark := l.r.Pos()
- if l.r.Peek(0) == '-' {
- l.r.Move(1)
- }
- c := l.r.Peek(0)
- if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c >= 0x80) {
- if c != '\\' || !l.consumeEscape() {
- l.r.Rewind(mark)
- return false
- }
- } else {
- l.r.Move(1)
- }
- for {
- c := l.r.Peek(0)
- if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
- if c != '\\' || !l.consumeEscape() {
- break
- }
- } else {
- l.r.Move(1)
- }
- }
- return true
-}
-
-// support custom variables, https://www.w3.org/TR/css-variables-1/
-func (l *Lexer) consumeCustomVariableToken() bool {
- // expect to be on a '-'
- l.r.Move(1)
- if l.r.Peek(0) != '-' {
- l.r.Move(-1)
- return false
- }
- if !l.consumeIdentToken() {
- l.r.Move(-1)
- return false
- }
- return true
-}
-
-func (l *Lexer) consumeAtKeywordToken() bool {
- // expect to be on an '@'
- l.r.Move(1)
- if !l.consumeIdentToken() {
- l.r.Move(-1)
- return false
- }
- return true
-}
-
-func (l *Lexer) consumeHashToken() bool {
- // expect to be on a '#'
- mark := l.r.Pos()
- l.r.Move(1)
- c := l.r.Peek(0)
- if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
- if c != '\\' || !l.consumeEscape() {
- l.r.Rewind(mark)
- return false
- }
- } else {
- l.r.Move(1)
- }
- for {
- c := l.r.Peek(0)
- if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
- if c != '\\' || !l.consumeEscape() {
- break
- }
- } else {
- l.r.Move(1)
- }
- }
- return true
-}
-
-func (l *Lexer) consumeNumberToken() bool {
- mark := l.r.Pos()
- c := l.r.Peek(0)
- if c == '+' || c == '-' {
- l.r.Move(1)
- }
- firstDigit := l.consumeDigit()
- if firstDigit {
- for l.consumeDigit() {
- }
- }
- if l.r.Peek(0) == '.' {
- l.r.Move(1)
- if l.consumeDigit() {
- for l.consumeDigit() {
- }
- } else if firstDigit {
- // . could belong to the next token
- l.r.Move(-1)
- return true
- } else {
- l.r.Rewind(mark)
- return false
- }
- } else if !firstDigit {
- l.r.Rewind(mark)
- return false
- }
- mark = l.r.Pos()
- c = l.r.Peek(0)
- if c == 'e' || c == 'E' {
- l.r.Move(1)
- c = l.r.Peek(0)
- if c == '+' || c == '-' {
- l.r.Move(1)
- }
- if !l.consumeDigit() {
- // e could belong to next token
- l.r.Rewind(mark)
- return true
- }
- for l.consumeDigit() {
- }
- }
- return true
-}
-
-func (l *Lexer) consumeUnicodeRangeToken() bool {
- c := l.r.Peek(0)
- if (c != 'u' && c != 'U') || l.r.Peek(1) != '+' {
- return false
- }
- mark := l.r.Pos()
- l.r.Move(2)
- if l.consumeHexDigit() {
- // consume up to 6 hexDigits
- k := 1
- for ; k < 6; k++ {
- if !l.consumeHexDigit() {
- break
- }
- }
-
- // either a minus or a question mark or the end is expected
- if l.consumeByte('-') {
- // consume another up to 6 hexDigits
- if l.consumeHexDigit() {
- for k := 1; k < 6; k++ {
- if !l.consumeHexDigit() {
- break
- }
- }
- } else {
- l.r.Rewind(mark)
- return false
- }
- } else {
- // could be filled up to 6 characters with question marks or else regular hexDigits
- if l.consumeByte('?') {
- k++
- for ; k < 6; k++ {
- if !l.consumeByte('?') {
- l.r.Rewind(mark)
- return false
- }
- }
- }
- }
- } else {
- // consume 6 question marks
- for k := 0; k < 6; k++ {
- if !l.consumeByte('?') {
- l.r.Rewind(mark)
- return false
- }
- }
- }
- return true
-}
-
-func (l *Lexer) consumeColumnToken() bool {
- if l.r.Peek(0) == '|' && l.r.Peek(1) == '|' {
- l.r.Move(2)
- return true
- }
- return false
-}
-
-func (l *Lexer) consumeCDOToken() bool {
- if l.r.Peek(0) == '<' && l.r.Peek(1) == '!' && l.r.Peek(2) == '-' && l.r.Peek(3) == '-' {
- l.r.Move(4)
- return true
- }
- return false
-}
-
-func (l *Lexer) consumeCDCToken() bool {
- if l.r.Peek(0) == '-' && l.r.Peek(1) == '-' && l.r.Peek(2) == '>' {
- l.r.Move(3)
- return true
- }
- return false
-}
-
-////////////////////////////////////////////////////////////////
-
-// consumeMatch consumes any MatchToken.
-func (l *Lexer) consumeMatch() TokenType {
- if l.r.Peek(1) == '=' {
- switch l.r.Peek(0) {
- case '~':
- l.r.Move(2)
- return IncludeMatchToken
- case '|':
- l.r.Move(2)
- return DashMatchToken
- case '^':
- l.r.Move(2)
- return PrefixMatchToken
- case '$':
- l.r.Move(2)
- return SuffixMatchToken
- case '*':
- l.r.Move(2)
- return SubstringMatchToken
- }
- }
- return ErrorToken
-}
-
-// consumeBracket consumes any bracket token.
-func (l *Lexer) consumeBracket() TokenType {
- switch l.r.Peek(0) {
- case '(':
- l.r.Move(1)
- return LeftParenthesisToken
- case ')':
- l.r.Move(1)
- return RightParenthesisToken
- case '[':
- l.r.Move(1)
- return LeftBracketToken
- case ']':
- l.r.Move(1)
- return RightBracketToken
- case '{':
- l.r.Move(1)
- return LeftBraceToken
- case '}':
- l.r.Move(1)
- return RightBraceToken
- }
- return ErrorToken
-}
-
-// consumeNumeric consumes NumberToken, PercentageToken or DimensionToken.
-func (l *Lexer) consumeNumeric() TokenType {
- if l.consumeNumberToken() {
- if l.consumeByte('%') {
- return PercentageToken
- } else if l.consumeIdentToken() {
- return DimensionToken
- }
- return NumberToken
- }
- return ErrorToken
-}
-
-// consumeString consumes a string and may return BadStringToken when a newline is encountered.
-func (l *Lexer) consumeString() TokenType {
- // assume to be on " or '
- delim := l.r.Peek(0)
- l.r.Move(1)
- for {
- c := l.r.Peek(0)
- if c == 0 && l.Err() != nil {
- break
- } else if c == '\n' || c == '\r' || c == '\f' {
- l.r.Move(1)
- return BadStringToken
- } else if c == delim {
- l.r.Move(1)
- break
- } else if c == '\\' {
- if !l.consumeEscape() {
- l.r.Move(1)
- l.consumeNewline()
- }
- } else {
- l.r.Move(1)
- }
- }
- return StringToken
-}
-
-func (l *Lexer) consumeUnquotedURL() bool {
- for {
- c := l.r.Peek(0)
- if c == 0 && l.Err() != nil || c == ')' {
- break
- } else if c == '"' || c == '\'' || c == '(' || c == '\\' || c == ' ' || c <= 0x1F || c == 0x7F {
- if c != '\\' || !l.consumeEscape() {
- return false
- }
- } else {
- l.r.Move(1)
- }
- }
- return true
-}
-
-// consumeRemnantsBadUrl consumes bytes of a BadUrlToken so that normal tokenization may continue.
-func (l *Lexer) consumeRemnantsBadURL() {
- for {
- if l.consumeByte(')') || l.Err() != nil {
- break
- } else if !l.consumeEscape() {
- l.r.Move(1)
- }
- }
-}
-
-// consumeIdentlike consumes IdentToken, FunctionToken or UrlToken.
-func (l *Lexer) consumeIdentlike() TokenType {
- if l.consumeIdentToken() {
- if l.r.Peek(0) != '(' {
- return IdentToken
- } else if !parse.EqualFold(bytes.Replace(l.r.Lexeme(), []byte{'\\'}, nil, -1), []byte{'u', 'r', 'l'}) {
- l.r.Move(1)
- return FunctionToken
- }
- l.r.Move(1)
-
- // consume url
- for l.consumeWhitespace() {
- }
- if c := l.r.Peek(0); c == '"' || c == '\'' {
- if l.consumeString() == BadStringToken {
- l.consumeRemnantsBadURL()
- return BadURLToken
- }
- } else if !l.consumeUnquotedURL() && !l.consumeWhitespace() {
- l.consumeRemnantsBadURL()
- return BadURLToken
- }
- for l.consumeWhitespace() {
- }
- if !l.consumeByte(')') && l.Err() != io.EOF {
- l.consumeRemnantsBadURL()
- return BadURLToken
- }
- return URLToken
- }
- return ErrorToken
-}
diff --git a/vendor/github.com/tdewolff/parse/css/lex_test.go b/vendor/github.com/tdewolff/parse/css/lex_test.go
deleted file mode 100644
index 0bdc891..0000000
--- a/vendor/github.com/tdewolff/parse/css/lex_test.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package css // import "github.com/tdewolff/parse/css"
-
-import (
- "bytes"
- "fmt"
- "io"
- "testing"
-
- "github.com/tdewolff/test"
-)
-
-type TTs []TokenType
-
-func TestTokens(t *testing.T) {
- var tokenTests = []struct {
- css string
- expected []TokenType
- }{
- {" ", TTs{}},
- {"5.2 .4", TTs{NumberToken, NumberToken}},
- {"color: red;", TTs{IdentToken, ColonToken, IdentToken, SemicolonToken}},
- {"background: url(\"http://x\");", TTs{IdentToken, ColonToken, URLToken, SemicolonToken}},
- {"background: URL(x.png);", TTs{IdentToken, ColonToken, URLToken, SemicolonToken}},
- {"color: rgb(4, 0%, 5em);", TTs{IdentToken, ColonToken, FunctionToken, NumberToken, CommaToken, PercentageToken, CommaToken, DimensionToken, RightParenthesisToken, SemicolonToken}},
- {"body { \"string\" }", TTs{IdentToken, LeftBraceToken, StringToken, RightBraceToken}},
- {"body { \"str\\\"ing\" }", TTs{IdentToken, LeftBraceToken, StringToken, RightBraceToken}},
- {".class { }", TTs{DelimToken, IdentToken, LeftBraceToken, RightBraceToken}},
- {"#class { }", TTs{HashToken, LeftBraceToken, RightBraceToken}},
- {"#class\\#withhash { }", TTs{HashToken, LeftBraceToken, RightBraceToken}},
- {"@media print { }", TTs{AtKeywordToken, IdentToken, LeftBraceToken, RightBraceToken}},
- {"/*comment*/", TTs{CommentToken}},
- {"/*com* /ment*/", TTs{CommentToken}},
- {"~= |= ^= $= *=", TTs{IncludeMatchToken, DashMatchToken, PrefixMatchToken, SuffixMatchToken, SubstringMatchToken}},
- {"||", TTs{ColumnToken}},
- {"<!-- -->", TTs{CDOToken, CDCToken}},
- {"U+1234", TTs{UnicodeRangeToken}},
- {"5.2 .4 4e-22", TTs{NumberToken, NumberToken, NumberToken}},
- {"--custom-variable", TTs{CustomPropertyNameToken}},
-
- // unexpected ending
- {"ident", TTs{IdentToken}},
- {"123.", TTs{NumberToken, DelimToken}},
- {"\"string", TTs{StringToken}},
- {"123/*comment", TTs{NumberToken, CommentToken}},
- {"U+1-", TTs{IdentToken, NumberToken, DelimToken}},
-
- // unicode
- {"fooδbar􀀀", TTs{IdentToken}},
- {"foo\\æ\\†", TTs{IdentToken}},
- // {"foo\x00bar", TTs{IdentToken}},
- {"'foo\u554abar'", TTs{StringToken}},
- {"\\000026B", TTs{IdentToken}},
- {"\\26 B", TTs{IdentToken}},
-
- // hacks
- {`\-\mo\z\-b\i\nd\in\g:\url(//business\i\nfo.co.uk\/labs\/xbl\/xbl\.xml\#xss);`, TTs{IdentToken, ColonToken, URLToken, SemicolonToken}},
- {"width/**/:/**/ 40em;", TTs{IdentToken, CommentToken, ColonToken, CommentToken, DimensionToken, SemicolonToken}},
- {":root *> #quince", TTs{ColonToken, IdentToken, DelimToken, DelimToken, HashToken}},
- {"html[xmlns*=\"\"]:root", TTs{IdentToken, LeftBracketToken, IdentToken, SubstringMatchToken, StringToken, RightBracketToken, ColonToken, IdentToken}},
- {"body:nth-of-type(1)", TTs{IdentToken, ColonToken, FunctionToken, NumberToken, RightParenthesisToken}},
- {"color/*\\**/: blue\\9;", TTs{IdentToken, CommentToken, ColonToken, IdentToken, SemicolonToken}},
- {"color: blue !ie;", TTs{IdentToken, ColonToken, IdentToken, DelimToken, IdentToken, SemicolonToken}},
-
- // escapes, null and replacement character
- {"c\\\x00olor: white;", TTs{IdentToken, ColonToken, IdentToken, SemicolonToken}},
- {"null\\0", TTs{IdentToken}},
- {"eof\\", TTs{IdentToken}},
- {"\"a\x00b\"", TTs{StringToken}},
- {"a\\\x00b", TTs{IdentToken}},
- {"url(a\x00b)", TTs{BadURLToken}}, // null character cannot be unquoted
- {"/*a\x00b*/", TTs{CommentToken}},
-
- // coverage
- {" \n\r\n\r\"\\\r\n\\\r\"", TTs{StringToken}},
- {"U+?????? U+ABCD?? U+ABC-DEF", TTs{UnicodeRangeToken, UnicodeRangeToken, UnicodeRangeToken}},
- {"U+? U+A?", TTs{IdentToken, DelimToken, DelimToken, IdentToken, DelimToken, IdentToken, DelimToken}},
- {"-5.23 -moz", TTs{NumberToken, IdentToken}},
- {"()", TTs{LeftParenthesisToken, RightParenthesisToken}},
- {"url( //url )", TTs{URLToken}},
- {"url( ", TTs{URLToken}},
- {"url( //url", TTs{URLToken}},
- {"url(\")a", TTs{URLToken}},
- {"url(a'\\\n)a", TTs{BadURLToken, IdentToken}},
- {"url(\"\n)a", TTs{BadURLToken, IdentToken}},
- {"url(a h)a", TTs{BadURLToken, IdentToken}},
- {"<!- | @4 ## /2", TTs{DelimToken, DelimToken, DelimToken, DelimToken, DelimToken, NumberToken, DelimToken, DelimToken, DelimToken, NumberToken}},
- {"\"s\\\n\"", TTs{StringToken}},
- {"\"a\\\"b\"", TTs{StringToken}},
- {"\"s\n", TTs{BadStringToken}},
-
- // small
- {"\"abcd", TTs{StringToken}},
- {"/*comment", TTs{CommentToken}},
- {"U+A-B", TTs{UnicodeRangeToken}},
- {"url((", TTs{BadURLToken}},
- {"id\u554a", TTs{IdentToken}},
- }
- for _, tt := range tokenTests {
- t.Run(tt.css, func(t *testing.T) {
- l := NewLexer(bytes.NewBufferString(tt.css))
- i := 0
- for {
- token, _ := l.Next()
- if token == ErrorToken {
- test.T(t, l.Err(), io.EOF)
- test.T(t, i, len(tt.expected), "when error occurred we must be at the end")
- break
- } else if token == WhitespaceToken {
- continue
- }
- test.That(t, i < len(tt.expected), "index", i, "must not exceed expected token types size", len(tt.expected))
- if i < len(tt.expected) {
- test.T(t, token, tt.expected[i], "token types must match")
- }
- i++
- }
- })
- }
-
- test.T(t, WhitespaceToken.String(), "Whitespace")
- test.T(t, EmptyToken.String(), "Empty")
- test.T(t, CustomPropertyValueToken.String(), "CustomPropertyValue")
- test.T(t, TokenType(100).String(), "Invalid(100)")
- test.T(t, NewLexer(bytes.NewBufferString("x")).consumeBracket(), ErrorToken, "consumeBracket on 'x' must return error")
-}
-
-////////////////////////////////////////////////////////////////
-
-func ExampleNewLexer() {
- l := NewLexer(bytes.NewBufferString("color: red;"))
- out := ""
- for {
- tt, data := l.Next()
- if tt == ErrorToken {
- break
- } else if tt == WhitespaceToken || tt == CommentToken {
- continue
- }
- out += string(data)
- }
- fmt.Println(out)
- // Output: color:red;
-}
diff --git a/vendor/github.com/tdewolff/parse/css/parse.go b/vendor/github.com/tdewolff/parse/css/parse.go
deleted file mode 100644
index cedd237..0000000
--- a/vendor/github.com/tdewolff/parse/css/parse.go
+++ /dev/null
@@ -1,402 +0,0 @@
-package css // import "github.com/tdewolff/parse/css"
-
-import (
- "bytes"
- "io"
- "strconv"
-
- "github.com/tdewolff/parse"
-)
-
-var wsBytes = []byte(" ")
-var endBytes = []byte("}")
-var emptyBytes = []byte("")
-
-// GrammarType determines the type of grammar.
-type GrammarType uint32
-
-// GrammarType values.
-const (
- ErrorGrammar GrammarType = iota // extra token when errors occur
- CommentGrammar
- AtRuleGrammar
- BeginAtRuleGrammar
- EndAtRuleGrammar
- QualifiedRuleGrammar
- BeginRulesetGrammar
- EndRulesetGrammar
- DeclarationGrammar
- TokenGrammar
- CustomPropertyGrammar
-)
-
-// String returns the string representation of a GrammarType.
-func (tt GrammarType) String() string {
- switch tt {
- case ErrorGrammar:
- return "Error"
- case CommentGrammar:
- return "Comment"
- case AtRuleGrammar:
- return "AtRule"
- case BeginAtRuleGrammar:
- return "BeginAtRule"
- case EndAtRuleGrammar:
- return "EndAtRule"
- case QualifiedRuleGrammar:
- return "QualifiedRule"
- case BeginRulesetGrammar:
- return "BeginRuleset"
- case EndRulesetGrammar:
- return "EndRuleset"
- case DeclarationGrammar:
- return "Declaration"
- case TokenGrammar:
- return "Token"
- case CustomPropertyGrammar:
- return "CustomProperty"
- }
- return "Invalid(" + strconv.Itoa(int(tt)) + ")"
-}
-
-////////////////////////////////////////////////////////////////
-
-// State is the state function the parser currently is in.
-type State func(*Parser) GrammarType
-
-// Token is a single TokenType and its associated data.
-type Token struct {
- TokenType
- Data []byte
-}
-
-func (t Token) String() string {
- return t.TokenType.String() + "('" + string(t.Data) + "')"
-}
-
-// Parser is the state for the parser.
-type Parser struct {
- l *Lexer
- state []State
- err error
-
- buf []Token
- level int
-
- tt TokenType
- data []byte
- prevWS bool
- prevEnd bool
-}
-
-// NewParser returns a new CSS parser from an io.Reader. isInline specifies whether this is an inline style attribute.
-func NewParser(r io.Reader, isInline bool) *Parser {
- l := NewLexer(r)
- p := &Parser{
- l: l,
- state: make([]State, 0, 4),
- }
-
- if isInline {
- p.state = append(p.state, (*Parser).parseDeclarationList)
- } else {
- p.state = append(p.state, (*Parser).parseStylesheet)
- }
- return p
-}
-
-// Err returns the error encountered during parsing, this is often io.EOF but also other errors can be returned.
-func (p *Parser) Err() error {
- if p.err != nil {
- return p.err
- }
- return p.l.Err()
-}
-
-// Restore restores the NULL byte at the end of the buffer.
-func (p *Parser) Restore() {
- p.l.Restore()
-}
-
-// Next returns the next Grammar. It returns ErrorGrammar when an error was encountered. Using Err() one can retrieve the error message.
-func (p *Parser) Next() (GrammarType, TokenType, []byte) {
- p.err = nil
-
- if p.prevEnd {
- p.tt, p.data = RightBraceToken, endBytes
- p.prevEnd = false
- } else {
- p.tt, p.data = p.popToken(true)
- }
- gt := p.state[len(p.state)-1](p)
- return gt, p.tt, p.data
-}
-
-// Values returns a slice of Tokens for the last Grammar. Only AtRuleGrammar, BeginAtRuleGrammar, BeginRulesetGrammar and Declaration will return the at-rule components, ruleset selector and declaration values respectively.
-func (p *Parser) Values() []Token {
- return p.buf
-}
-
-func (p *Parser) popToken(allowComment bool) (TokenType, []byte) {
- p.prevWS = false
- tt, data := p.l.Next()
- for tt == WhitespaceToken || tt == CommentToken {
- if tt == WhitespaceToken {
- p.prevWS = true
- } else if allowComment && len(p.state) == 1 {
- break
- }
- tt, data = p.l.Next()
- }
- return tt, data
-}
-
-func (p *Parser) initBuf() {
- p.buf = p.buf[:0]
-}
-
-func (p *Parser) pushBuf(tt TokenType, data []byte) {
- p.buf = append(p.buf, Token{tt, data})
-}
-
-////////////////////////////////////////////////////////////////
-
-func (p *Parser) parseStylesheet() GrammarType {
- if p.tt == CDOToken || p.tt == CDCToken {
- return TokenGrammar
- } else if p.tt == AtKeywordToken {
- return p.parseAtRule()
- } else if p.tt == CommentToken {
- return CommentGrammar
- } else if p.tt == ErrorToken {
- return ErrorGrammar
- }
- return p.parseQualifiedRule()
-}
-
-func (p *Parser) parseDeclarationList() GrammarType {
- if p.tt == CommentToken {
- p.tt, p.data = p.popToken(false)
- }
- for p.tt == SemicolonToken {
- p.tt, p.data = p.popToken(false)
- }
- if p.tt == ErrorToken {
- return ErrorGrammar
- } else if p.tt == AtKeywordToken {
- return p.parseAtRule()
- } else if p.tt == IdentToken {
- return p.parseDeclaration()
- } else if p.tt == CustomPropertyNameToken {
- return p.parseCustomProperty()
- }
-
- // parse error
- p.initBuf()
- p.err = parse.NewErrorLexer("unexpected token in declaration", p.l.r)
- for {
- tt, data := p.popToken(false)
- if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
- p.prevEnd = (tt == RightBraceToken)
- return ErrorGrammar
- }
- p.pushBuf(tt, data)
- }
-}
-
-////////////////////////////////////////////////////////////////
-
-func (p *Parser) parseAtRule() GrammarType {
- p.initBuf()
- parse.ToLower(p.data)
- atRuleName := p.data
- if len(atRuleName) > 0 && atRuleName[1] == '-' {
- if i := bytes.IndexByte(atRuleName[2:], '-'); i != -1 {
- atRuleName = atRuleName[i+2:] // skip vendor specific prefix
- }
- }
- atRule := ToHash(atRuleName[1:])
-
- first := true
- skipWS := false
- for {
- tt, data := p.popToken(false)
- if tt == LeftBraceToken && p.level == 0 {
- if atRule == Font_Face || atRule == Page {
- p.state = append(p.state, (*Parser).parseAtRuleDeclarationList)
- } else if atRule == Document || atRule == Keyframes || atRule == Media || atRule == Supports {
- p.state = append(p.state, (*Parser).parseAtRuleRuleList)
- } else {
- p.state = append(p.state, (*Parser).parseAtRuleUnknown)
- }
- return BeginAtRuleGrammar
- } else if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
- p.prevEnd = (tt == RightBraceToken)
- return AtRuleGrammar
- } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
- p.level++
- } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
- p.level--
- }
- if first {
- if tt == LeftParenthesisToken || tt == LeftBracketToken {
- p.prevWS = false
- }
- first = false
- }
- if len(data) == 1 && (data[0] == ',' || data[0] == ':') {
- skipWS = true
- } else if p.prevWS && !skipWS && tt != RightParenthesisToken {
- p.pushBuf(WhitespaceToken, wsBytes)
- } else {
- skipWS = false
- }
- if tt == LeftParenthesisToken {
- skipWS = true
- }
- p.pushBuf(tt, data)
- }
-}
-
-func (p *Parser) parseAtRuleRuleList() GrammarType {
- if p.tt == RightBraceToken || p.tt == ErrorToken {
- p.state = p.state[:len(p.state)-1]
- return EndAtRuleGrammar
- } else if p.tt == AtKeywordToken {
- return p.parseAtRule()
- } else {
- return p.parseQualifiedRule()
- }
-}
-
-func (p *Parser) parseAtRuleDeclarationList() GrammarType {
- for p.tt == SemicolonToken {
- p.tt, p.data = p.popToken(false)
- }
- if p.tt == RightBraceToken || p.tt == ErrorToken {
- p.state = p.state[:len(p.state)-1]
- return EndAtRuleGrammar
- }
- return p.parseDeclarationList()
-}
-
-func (p *Parser) parseAtRuleUnknown() GrammarType {
- if p.tt == RightBraceToken && p.level == 0 || p.tt == ErrorToken {
- p.state = p.state[:len(p.state)-1]
- return EndAtRuleGrammar
- }
- if p.tt == LeftParenthesisToken || p.tt == LeftBraceToken || p.tt == LeftBracketToken || p.tt == FunctionToken {
- p.level++
- } else if p.tt == RightParenthesisToken || p.tt == RightBraceToken || p.tt == RightBracketToken {
- p.level--
- }
- return TokenGrammar
-}
-
-func (p *Parser) parseQualifiedRule() GrammarType {
- p.initBuf()
- first := true
- inAttrSel := false
- skipWS := true
- var tt TokenType
- var data []byte
- for {
- if first {
- tt, data = p.tt, p.data
- p.tt = WhitespaceToken
- p.data = emptyBytes
- first = false
- } else {
- tt, data = p.popToken(false)
- }
- if tt == LeftBraceToken && p.level == 0 {
- p.state = append(p.state, (*Parser).parseQualifiedRuleDeclarationList)
- return BeginRulesetGrammar
- } else if tt == ErrorToken {
- p.err = parse.NewErrorLexer("unexpected ending in qualified rule, expected left brace token", p.l.r)
- return ErrorGrammar
- } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
- p.level++
- } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
- p.level--
- }
- if len(data) == 1 && (data[0] == ',' || data[0] == '>' || data[0] == '+' || data[0] == '~') {
- if data[0] == ',' {
- return QualifiedRuleGrammar
- }
- skipWS = true
- } else if p.prevWS && !skipWS && !inAttrSel {
- p.pushBuf(WhitespaceToken, wsBytes)
- } else {
- skipWS = false
- }
- if tt == LeftBracketToken {
- inAttrSel = true
- } else if tt == RightBracketToken {
- inAttrSel = false
- }
- p.pushBuf(tt, data)
- }
-}
-
-func (p *Parser) parseQualifiedRuleDeclarationList() GrammarType {
- for p.tt == SemicolonToken {
- p.tt, p.data = p.popToken(false)
- }
- if p.tt == RightBraceToken || p.tt == ErrorToken {
- p.state = p.state[:len(p.state)-1]
- return EndRulesetGrammar
- }
- return p.parseDeclarationList()
-}
-
-func (p *Parser) parseDeclaration() GrammarType {
- p.initBuf()
- parse.ToLower(p.data)
- if tt, _ := p.popToken(false); tt != ColonToken {
- p.err = parse.NewErrorLexer("unexpected token in declaration", p.l.r)
- return ErrorGrammar
- }
- skipWS := true
- for {
- tt, data := p.popToken(false)
- if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
- p.prevEnd = (tt == RightBraceToken)
- return DeclarationGrammar
- } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
- p.level++
- } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
- p.level--
- }
- if len(data) == 1 && (data[0] == ',' || data[0] == '/' || data[0] == ':' || data[0] == '!' || data[0] == '=') {
- skipWS = true
- } else if p.prevWS && !skipWS {
- p.pushBuf(WhitespaceToken, wsBytes)
- } else {
- skipWS = false
- }
- p.pushBuf(tt, data)
- }
-}
-
-func (p *Parser) parseCustomProperty() GrammarType {
- p.initBuf()
- if tt, _ := p.popToken(false); tt != ColonToken {
- p.err = parse.NewErrorLexer("unexpected token in declaration", p.l.r)
- return ErrorGrammar
- }
- val := []byte{}
- for {
- tt, data := p.l.Next()
- if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
- p.prevEnd = (tt == RightBraceToken)
- p.pushBuf(CustomPropertyValueToken, val)
- return CustomPropertyGrammar
- } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
- p.level++
- } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
- p.level--
- }
- val = append(val, data...)
- }
-}
diff --git a/vendor/github.com/tdewolff/parse/css/parse_test.go b/vendor/github.com/tdewolff/parse/css/parse_test.go
deleted file mode 100644
index 9871854..0000000
--- a/vendor/github.com/tdewolff/parse/css/parse_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package css // import "github.com/tdewolff/parse/css"
-
-import (
- "bytes"
- "fmt"
- "io"
- "testing"
-
- "github.com/tdewolff/parse"
- "github.com/tdewolff/test"
-)
-
-////////////////////////////////////////////////////////////////
-
-func TestParse(t *testing.T) {
- var parseTests = []struct {
- inline bool
- css string
- expected string
- }{
- {true, " x : y ; ", "x:y;"},
- {true, "color: red;", "color:red;"},
- {true, "color : red;", "color:red;"},
- {true, "color: red; border: 0;", "color:red;border:0;"},
- {true, "color: red !important;", "color:red!important;"},
- {true, "color: red ! important;", "color:red!important;"},
- {true, "white-space: -moz-pre-wrap;", "white-space:-moz-pre-wrap;"},
- {true, "display: -moz-inline-stack;", "display:-moz-inline-stack;"},
- {true, "x: 10px / 1em;", "x:10px/1em;"},
- {true, "x: 1em/1.5em \"Times New Roman\", Times, serif;", "x:1em/1.5em \"Times New Roman\",Times,serif;"},
- {true, "x: hsla(100,50%, 75%, 0.5);", "x:hsla(100,50%,75%,0.5);"},
- {true, "x: hsl(100,50%, 75%);", "x:hsl(100,50%,75%);"},
- {true, "x: rgba(255, 238 , 221, 0.3);", "x:rgba(255,238,221,0.3);"},
- {true, "x: 50vmax;", "x:50vmax;"},
- {true, "color: linear-gradient(to right, black, white);", "color:linear-gradient(to right,black,white);"},
- {true, "color: calc(100%/2 - 1em);", "color:calc(100%/2 - 1em);"},
- {true, "color: calc(100%/2--1em);", "color:calc(100%/2--1em);"},
- {false, "<!-- @charset; -->", "<!--@charset;-->"},
- {false, "@media print, screen { }", "@media print,screen{}"},
- {false, "@media { @viewport ; }", "@media{@viewport;}"},
- {false, "@keyframes 'diagonal-slide' { from { left: 0; top: 0; } to { left: 100px; top: 100px; } }", "@keyframes 'diagonal-slide'{from{left:0;top:0;}to{left:100px;top:100px;}}"},
- {false, "@keyframes movingbox{0%{left:90%;}50%{left:10%;}100%{left:90%;}}", "@keyframes movingbox{0%{left:90%;}50%{left:10%;}100%{left:90%;}}"},
- {false, ".foo { color: #fff;}", ".foo{color:#fff;}"},
- {false, ".foo { ; _color: #fff;}", ".foo{_color:#fff;}"},
- {false, "a { color: red; border: 0; }", "a{color:red;border:0;}"},
- {false, "a { color: red; border: 0; } b { padding: 0; }", "a{color:red;border:0;}b{padding:0;}"},
- {false, "/* comment */", "/* comment */"},
-
- // extraordinary
- {true, "color: red;;", "color:red;"},
- {true, "color:#c0c0c0", "color:#c0c0c0;"},
- {true, "background:URL(x.png);", "background:URL(x.png);"},
- {true, "filter: progid : DXImageTransform.Microsoft.BasicImage(rotation=1);", "filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);"},
- {true, "/*a*/\n/*c*/\nkey: value;", "key:value;"},
- {true, "@-moz-charset;", "@-moz-charset;"},
- {true, "--custom-variable: (0;) ;", "--custom-variable: (0;) ;"},
- {false, "@import;@import;", "@import;@import;"},
- {false, ".a .b#c, .d<.e { x:y; }", ".a .b#c,.d<.e{x:y;}"},
- {false, ".a[b~=c]d { x:y; }", ".a[b~=c]d{x:y;}"},
- // {false, "{x:y;}", "{x:y;}"},
- {false, "a{}", "a{}"},
- {false, "a,.b/*comment*/ {x:y;}", "a,.b{x:y;}"},
- {false, "a,.b/*comment*/.c {x:y;}", "a,.b.c{x:y;}"},
- {false, "a{x:; z:q;}", "a{x:;z:q;}"},
- {false, "@font-face { x:y; }", "@font-face{x:y;}"},
- {false, "a:not([controls]){x:y;}", "a:not([controls]){x:y;}"},
- {false, "@document regexp('https:.*') { p { color: red; } }", "@document regexp('https:.*'){p{color:red;}}"},
- {false, "@media all and ( max-width:400px ) { }", "@media all and (max-width:400px){}"},
- {false, "@media (max-width:400px) { }", "@media(max-width:400px){}"},
- {false, "@media (max-width:400px)", "@media(max-width:400px);"},
- {false, "@font-face { ; font:x; }", "@font-face{font:x;}"},
- {false, "@-moz-font-face { ; font:x; }", "@-moz-font-face{font:x;}"},
- {false, "@unknown abc { {} lala }", "@unknown abc{{}lala}"},
- {false, "a[x={}]{x:y;}", "a[x={}]{x:y;}"},
- {false, "a[x=,]{x:y;}", "a[x=,]{x:y;}"},
- {false, "a[x=+]{x:y;}", "a[x=+]{x:y;}"},
- {false, ".cla .ss > #id { x:y; }", ".cla .ss>#id{x:y;}"},
- {false, ".cla /*a*/ /*b*/ .ss{}", ".cla .ss{}"},
- {false, "a{x:f(a(),b);}", "a{x:f(a(),b);}"},
- {false, "a{x:y!z;}", "a{x:y!z;}"},
- {false, "[class*=\"column\"]+[class*=\"column\"]:last-child{a:b;}", "[class*=\"column\"]+[class*=\"column\"]:last-child{a:b;}"},
- {false, "@media { @viewport }", "@media{@viewport;}"},
- {false, "table { @unknown }", "table{@unknown;}"},
-
- // early endings
- {false, "selector{", "selector{"},
- {false, "@media{selector{", "@media{selector{"},
-
- // bad grammar
- {true, "~color:red", "~color:red;"},
- {false, ".foo { *color: #fff;}", ".foo{*color:#fff;}"},
- {true, "*color: red; font-size: 12pt;", "*color:red;font-size:12pt;"},
- {true, "_color: red; font-size: 12pt;", "_color:red;font-size:12pt;"},
-
- // issues
- {false, "@media print {.class{width:5px;}}", "@media print{.class{width:5px;}}"}, // #6
- {false, ".class{width:calc((50% + 2em)/2 + 14px);}", ".class{width:calc((50% + 2em)/2 + 14px);}"}, // #7
- {false, ".class [c=y]{}", ".class [c=y]{}"}, // tdewolff/minify#16
- {false, "table{font-family:Verdana}", "table{font-family:Verdana;}"}, // tdewolff/minify#22
-
- // go-fuzz
- {false, "@-webkit-", "@-webkit-;"},
- }
- for _, tt := range parseTests {
- t.Run(tt.css, func(t *testing.T) {
- output := ""
- p := NewParser(bytes.NewBufferString(tt.css), tt.inline)
- for {
- grammar, _, data := p.Next()
- data = parse.Copy(data)
- if grammar == ErrorGrammar {
- if err := p.Err(); err != io.EOF {
- for _, val := range p.Values() {
- data = append(data, val.Data...)
- }
- if perr, ok := err.(*parse.Error); ok && perr.Message == "unexpected token in declaration" {
- data = append(data, ";"...)
- }
- } else {
- test.T(t, err, io.EOF)
- break
- }
- } else if grammar == AtRuleGrammar || grammar == BeginAtRuleGrammar || grammar == QualifiedRuleGrammar || grammar == BeginRulesetGrammar || grammar == DeclarationGrammar || grammar == CustomPropertyGrammar {
- if grammar == DeclarationGrammar || grammar == CustomPropertyGrammar {
- data = append(data, ":"...)
- }
- for _, val := range p.Values() {
- data = append(data, val.Data...)
- }
- if grammar == BeginAtRuleGrammar || grammar == BeginRulesetGrammar {
- data = append(data, "{"...)
- } else if grammar == AtRuleGrammar || grammar == DeclarationGrammar || grammar == CustomPropertyGrammar {
- data = append(data, ";"...)
- } else if grammar == QualifiedRuleGrammar {
- data = append(data, ","...)
- }
- }
- output += string(data)
- }
- test.String(t, output, tt.expected)
- })
- }
-
- test.T(t, ErrorGrammar.String(), "Error")
- test.T(t, AtRuleGrammar.String(), "AtRule")
- test.T(t, BeginAtRuleGrammar.String(), "BeginAtRule")
- test.T(t, EndAtRuleGrammar.String(), "EndAtRule")
- test.T(t, BeginRulesetGrammar.String(), "BeginRuleset")
- test.T(t, EndRulesetGrammar.String(), "EndRuleset")
- test.T(t, DeclarationGrammar.String(), "Declaration")
- test.T(t, TokenGrammar.String(), "Token")
- test.T(t, CommentGrammar.String(), "Comment")
- test.T(t, CustomPropertyGrammar.String(), "CustomProperty")
- test.T(t, GrammarType(100).String(), "Invalid(100)")
-}
-
-func TestParseError(t *testing.T) {
- var parseErrorTests = []struct {
- inline bool
- css string
- col int
- }{
- {false, "selector", 9},
- {true, "color 0", 8},
- {true, "--color 0", 10},
- {true, "--custom-variable:0", 0},
- }
- for _, tt := range parseErrorTests {
- t.Run(tt.css, func(t *testing.T) {
- p := NewParser(bytes.NewBufferString(tt.css), tt.inline)
- for {
- grammar, _, _ := p.Next()
- if grammar == ErrorGrammar {
- if tt.col == 0 {
- test.T(t, p.Err(), io.EOF)
- } else if perr, ok := p.Err().(*parse.Error); ok {
- _, col, _ := perr.Position()
- test.T(t, col, tt.col)
- } else {
- test.Fail(t, "bad error:", p.Err())
- }
- break
- }
- }
- })
- }
-}
-
-func TestReader(t *testing.T) {
- input := "x:a;"
- p := NewParser(test.NewPlainReader(bytes.NewBufferString(input)), true)
- for {
- grammar, _, _ := p.Next()
- if grammar == ErrorGrammar {
- break
- }
- }
-}
-
-////////////////////////////////////////////////////////////////
-
-type Obj struct{}
-
-func (*Obj) F() {}
-
-var f1 func(*Obj)
-
-func BenchmarkFuncPtr(b *testing.B) {
- for i := 0; i < b.N; i++ {
- f1 = (*Obj).F
- }
-}
-
-var f2 func()
-
-func BenchmarkMemFuncPtr(b *testing.B) {
- obj := &Obj{}
- for i := 0; i < b.N; i++ {
- f2 = obj.F
- }
-}
-
-func ExampleNewParser() {
- p := NewParser(bytes.NewBufferString("color: red;"), true) // false because this is the content of an inline style attribute
- out := ""
- for {
- gt, _, data := p.Next()
- if gt == ErrorGrammar {
- break
- } else if gt == AtRuleGrammar || gt == BeginAtRuleGrammar || gt == BeginRulesetGrammar || gt == DeclarationGrammar {
- out += string(data)
- if gt == DeclarationGrammar {
- out += ":"
- }
- for _, val := range p.Values() {
- out += string(val.Data)
- }
- if gt == BeginAtRuleGrammar || gt == BeginRulesetGrammar {
- out += "{"
- } else if gt == AtRuleGrammar || gt == DeclarationGrammar {
- out += ";"
- }
- } else {
- out += string(data)
- }
- }
- fmt.Println(out)
- // Output: color:red;
-}
diff --git a/vendor/github.com/tdewolff/parse/css/util.go b/vendor/github.com/tdewolff/parse/css/util.go
deleted file mode 100644
index 676dee8..0000000
--- a/vendor/github.com/tdewolff/parse/css/util.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package css // import "github.com/tdewolff/parse/css"
-
-import "github.com/tdewolff/parse/buffer"
-
-// IsIdent returns true if the bytes are a valid identifier.
-func IsIdent(b []byte) bool {
- l := NewLexer(buffer.NewReader(b))
- l.consumeIdentToken()
- l.r.Restore()
- return l.r.Pos() == len(b)
-}
-
-// IsURLUnquoted returns true if the bytes are a valid unquoted URL.
-func IsURLUnquoted(b []byte) bool {
- l := NewLexer(buffer.NewReader(b))
- l.consumeUnquotedURL()
- l.r.Restore()
- return l.r.Pos() == len(b)
-}
-
-// HSL2RGB converts HSL to RGB with all of range [0,1]
-// from http://www.w3.org/TR/css3-color/#hsl-color
-func HSL2RGB(h, s, l float64) (float64, float64, float64) {
- m2 := l * (s + 1)
- if l > 0.5 {
- m2 = l + s - l*s
- }
- m1 := l*2 - m2
- return hue2rgb(m1, m2, h+1.0/3.0), hue2rgb(m1, m2, h), hue2rgb(m1, m2, h-1.0/3.0)
-}
-
-func hue2rgb(m1, m2, h float64) float64 {
- if h < 0.0 {
- h += 1.0
- }
- if h > 1.0 {
- h -= 1.0
- }
- if h*6.0 < 1.0 {
- return m1 + (m2-m1)*h*6.0
- } else if h*2.0 < 1.0 {
- return m2
- } else if h*3.0 < 2.0 {
- return m1 + (m2-m1)*(2.0/3.0-h)*6.0
- }
- return m1
-}
diff --git a/vendor/github.com/tdewolff/parse/css/util_test.go b/vendor/github.com/tdewolff/parse/css/util_test.go
deleted file mode 100644
index 9eb5aa9..0000000
--- a/vendor/github.com/tdewolff/parse/css/util_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package css // import "github.com/tdewolff/parse/css"
-
-import (
- "testing"
-
- "github.com/tdewolff/test"
-)
-
-func TestIsIdent(t *testing.T) {
- test.That(t, IsIdent([]byte("color")))
- test.That(t, !IsIdent([]byte("4.5")))
-}
-
-func TestIsURLUnquoted(t *testing.T) {
- test.That(t, IsURLUnquoted([]byte("http://x")))
- test.That(t, !IsURLUnquoted([]byte(")")))
-}
-
-func TestHsl2Rgb(t *testing.T) {
- r, g, b := HSL2RGB(0.0, 1.0, 0.5)
- test.T(t, r, 1.0)
- test.T(t, g, 0.0)
- test.T(t, b, 0.0)
-
- r, g, b = HSL2RGB(1.0, 1.0, 0.5)
- test.T(t, r, 1.0)
- test.T(t, g, 0.0)
- test.T(t, b, 0.0)
-
- r, g, b = HSL2RGB(0.66, 0.0, 1.0)
- test.T(t, r, 1.0)
- test.T(t, g, 1.0)
- test.T(t, b, 1.0)
-}