a geicko-2 based round robin ranking system designed to test c++ battleship submissions
battleship.dunkirk.sh
1// Package shlex provides a simple lexical analysis like Unix shell.
2package shlex
3
4import (
5 "bufio"
6 "errors"
7 "io"
8 "strings"
9 "unicode"
10)
11
12var (
13 ErrNoClosing = errors.New("No closing quotation")
14 ErrNoEscaped = errors.New("No escaped character")
15)
16
17// Tokenizer is the interface that classifies a token according to
18// words, whitespaces, quotations, escapes and escaped quotations.
19type Tokenizer interface {
20 IsWord(rune) bool
21 IsWhitespace(rune) bool
22 IsQuote(rune) bool
23 IsEscape(rune) bool
24 IsEscapedQuote(rune) bool
25}
26
27// DefaultTokenizer implements a simple tokenizer like Unix shell.
28type DefaultTokenizer struct{}
29
30func (t *DefaultTokenizer) IsWord(r rune) bool {
31 return r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r)
32}
33func (t *DefaultTokenizer) IsQuote(r rune) bool {
34 switch r {
35 case '\'', '"':
36 return true
37 default:
38 return false
39 }
40}
41func (t *DefaultTokenizer) IsWhitespace(r rune) bool {
42 return unicode.IsSpace(r)
43}
44func (t *DefaultTokenizer) IsEscape(r rune) bool {
45 return r == '\\'
46}
47func (t *DefaultTokenizer) IsEscapedQuote(r rune) bool {
48 return r == '"'
49}
50
51// Lexer represents a lexical analyzer.
52type Lexer struct {
53 reader *bufio.Reader
54 tokenizer Tokenizer
55 posix bool
56 whitespacesplit bool
57}
58
59// NewLexer creates a new Lexer reading from io.Reader. This Lexer
60// has a DefaultTokenizer according to posix and whitespacesplit
61// rules.
62func NewLexer(r io.Reader, posix, whitespacesplit bool) *Lexer {
63 return &Lexer{
64 reader: bufio.NewReader(r),
65 tokenizer: &DefaultTokenizer{},
66 posix: posix,
67 whitespacesplit: whitespacesplit,
68 }
69}
70
71// NewLexerString creates a new Lexer reading from a string. This
72// Lexer has a DefaultTokenizer according to posix and whitespacesplit
73// rules.
74func NewLexerString(s string, posix, whitespacesplit bool) *Lexer {
75 return NewLexer(strings.NewReader(s), posix, whitespacesplit)
76}
77
78// Split splits a string according to posix or non-posix rules.
79func Split(s string, posix bool) ([]string, error) {
80 return NewLexerString(s, posix, true).Split()
81}
82
83// SetTokenizer sets a Tokenizer.
84func (l *Lexer) SetTokenizer(t Tokenizer) {
85 l.tokenizer = t
86}
87
88func (l *Lexer) Split() ([]string, error) {
89 result := make([]string, 0)
90 for {
91 token, err := l.readToken()
92 if token != "" {
93 result = append(result, token)
94 }
95
96 if err == io.EOF {
97 break
98 } else if err != nil {
99 return result, err
100 }
101 }
102 return result, nil
103}
104
105func (l *Lexer) readToken() (string, error) {
106 t := l.tokenizer
107 token := ""
108 quoted := false
109 state := ' '
110 escapedstate := ' '
111scanning:
112 for {
113 next, _, err := l.reader.ReadRune()
114 if err != nil {
115 if t.IsQuote(state) {
116 return token, ErrNoClosing
117 } else if t.IsEscape(state) {
118 return token, ErrNoEscaped
119 }
120 return token, err
121 }
122
123 switch {
124 case t.IsWhitespace(state):
125 switch {
126 case t.IsWhitespace(next):
127 break scanning
128 case l.posix && t.IsEscape(next):
129 escapedstate = 'a'
130 state = next
131 case t.IsWord(next):
132 token += string(next)
133 state = 'a'
134 case t.IsQuote(next):
135 if !l.posix {
136 token += string(next)
137 }
138 state = next
139 default:
140 token = string(next)
141 if l.whitespacesplit {
142 state = 'a'
143 } else if token != "" || (l.posix && quoted) {
144 break scanning
145 }
146 }
147 case t.IsQuote(state):
148 quoted = true
149 switch {
150 case next == state:
151 if !l.posix {
152 token += string(next)
153 break scanning
154 } else {
155 state = 'a'
156 }
157 case l.posix && t.IsEscape(next) && t.IsEscapedQuote(state):
158 escapedstate = state
159 state = next
160 default:
161 token += string(next)
162 }
163 case t.IsEscape(state):
164 if t.IsQuote(escapedstate) && next != state && next != escapedstate {
165 token += string(state)
166 }
167 token += string(next)
168 state = escapedstate
169 case t.IsWord(state):
170 switch {
171 case t.IsWhitespace(next):
172 if token != "" || (l.posix && quoted) {
173 break scanning
174 }
175 case l.posix && t.IsQuote(next):
176 state = next
177 case l.posix && t.IsEscape(next):
178 escapedstate = 'a'
179 state = next
180 case t.IsWord(next) || t.IsQuote(next):
181 token += string(next)
182 default:
183 if l.whitespacesplit {
184 token += string(next)
185 } else if token != "" {
186 l.reader.UnreadRune()
187 break scanning
188 }
189 }
190 }
191 }
192 return token, nil
193}