aboutsummaryrefslogtreecommitdiffhomepage
path: root/tokenizer.h
diff options
context:
space:
mode:
authorGravatar ridiculousfish <corydoras@ridiculousfish.com>2012-11-18 16:30:30 -0800
committerGravatar ridiculousfish <corydoras@ridiculousfish.com>2012-11-18 16:30:30 -0800
commit9992b8eb0e3366ff8a3948aa0b66a19c3c12c737 (patch)
tree6dda0fef85812016fbba9ea067c9d586092b506d /tokenizer.h
parentbab69f26724028d16054a3daf5c78aad7c67bb2d (diff)
Apply new indentation, brace, and whitespace style
Diffstat (limited to 'tokenizer.h')
-rw-r--r--tokenizer.h108
1 files changed, 54 insertions, 54 deletions
diff --git a/tokenizer.h b/tokenizer.h
index e6661d22..daa4a0b1 100644
--- a/tokenizer.h
+++ b/tokenizer.h
@@ -16,19 +16,19 @@
*/
enum token_type
{
- TOK_NONE, /**< Tokenizer not yet constructed */
- TOK_ERROR, /**< Error reading token */
- TOK_INVALID,/**< Invalid token */
- TOK_STRING,/**< String token */
- TOK_PIPE,/**< Pipe token */
- TOK_END,/**< End token */
- TOK_REDIRECT_OUT, /**< redirection token */
- TOK_REDIRECT_APPEND,/**< redirection append token */
- TOK_REDIRECT_IN,/**< input redirection token */
- TOK_REDIRECT_FD,/**< redirection to new fd token */
- TOK_REDIRECT_NOCLOB, /**<? redirection token */
- TOK_BACKGROUND,/**< send job to bg token */
- TOK_COMMENT/**< comment token */
+ TOK_NONE, /**< Tokenizer not yet constructed */
+ TOK_ERROR, /**< Error reading token */
+ TOK_INVALID,/**< Invalid token */
+ TOK_STRING,/**< String token */
+ TOK_PIPE,/**< Pipe token */
+ TOK_END,/**< End token */
+ TOK_REDIRECT_OUT, /**< redirection token */
+ TOK_REDIRECT_APPEND,/**< redirection append token */
+ TOK_REDIRECT_IN,/**< input redirection token */
+ TOK_REDIRECT_FD,/**< redirection to new fd token */
+ TOK_REDIRECT_NOCLOB, /**<? redirection token */
+ TOK_BACKGROUND,/**< send job to bg token */
+ TOK_COMMENT/**< comment token */
};
/**
@@ -36,12 +36,12 @@ enum token_type
*/
enum tokenizer_error
{
- TOK_UNTERMINATED_QUOTE,
- TOK_UNTERMINATED_SUBSHELL,
- TOK_UNTERMINATED_ESCAPE,
- TOK_OTHER
+ TOK_UNTERMINATED_QUOTE,
+ TOK_UNTERMINATED_SUBSHELL,
+ TOK_UNTERMINATED_ESCAPE,
+ TOK_OTHER
}
- ;
+;
/**
@@ -67,29 +67,29 @@ enum tokenizer_error
*/
struct tokenizer
{
- /** A pointer into the original string, showing where the next token begins */
- const wchar_t *buff;
- /** A copy of the original string */
- const wchar_t *orig_buff;
- /** A pointer to the last token*/
- wchar_t *last;
-
- /** Type of last token*/
- int last_type;
- /** Length of last token*/
- size_t last_len;
- /** Offset of last token*/
- size_t last_pos;
- /** Whether there are more tokens*/
- bool has_next;
- /** Whether incomplete tokens are accepted*/
- bool accept_unfinished;
- /** Whether commants should be returned*/
- bool show_comments;
- /** Type of last quote, can be either ' or ".*/
- wchar_t last_quote;
- /** Last error */
- int error;
+ /** A pointer into the original string, showing where the next token begins */
+ const wchar_t *buff;
+ /** A copy of the original string */
+ const wchar_t *orig_buff;
+ /** A pointer to the last token*/
+ wchar_t *last;
+
+ /** Type of last token*/
+ int last_type;
+ /** Length of last token*/
+ size_t last_len;
+ /** Offset of last token*/
+ size_t last_pos;
+ /** Whether there are more tokens*/
+ bool has_next;
+ /** Whether incomplete tokens are accepted*/
+ bool accept_unfinished;
+ /** Whether commants should be returned*/
+ bool show_comments;
+ /** Type of last quote, can be either ' or ".*/
+ wchar_t last_quote;
+ /** Last error */
+ int error;
/* Whether we are squashing errors */
bool squash_errors;
@@ -114,48 +114,48 @@ struct tokenizer
parenthesis, as a valid token. Setting TOK_SHOW_COMMENTS will return comments as tokens
*/
-void tok_init( tokenizer *tok, const wchar_t *b, int flags );
+void tok_init(tokenizer *tok, const wchar_t *b, int flags);
/**
Jump to the next token.
*/
-void tok_next( tokenizer *tok );
+void tok_next(tokenizer *tok);
/**
Returns the type of the last token. Must be one of the values in the token_type enum.
*/
-int tok_last_type( tokenizer *tok );
+int tok_last_type(tokenizer *tok);
/**
Returns the last token string. The string should not be freed by the caller.
*/
-wchar_t *tok_last( tokenizer *tok );
+wchar_t *tok_last(tokenizer *tok);
/**
Returns the type of quote from the last TOK_QSTRING
*/
-wchar_t tok_last_quote( tokenizer *tok );
+wchar_t tok_last_quote(tokenizer *tok);
/**
Returns true as long as there are more tokens left
*/
-int tok_has_next( tokenizer *tok );
+int tok_has_next(tokenizer *tok);
/**
Returns the position of the beginning of the current token in the original string
*/
-int tok_get_pos( tokenizer *tok );
+int tok_get_pos(tokenizer *tok);
/**
Destroy the tokenizer and free asociated memory
*/
-void tok_destroy( tokenizer *tok );
+void tok_destroy(tokenizer *tok);
/**
Returns the original string to tokenizer
*/
-const wchar_t *tok_string( tokenizer *tok );
+const wchar_t *tok_string(tokenizer *tok);
/**
@@ -165,22 +165,22 @@ const wchar_t *tok_string( tokenizer *tok );
The string should be freed. After use.
*/
-wchar_t *tok_first( const wchar_t *str );
+wchar_t *tok_first(const wchar_t *str);
/**
Move tokenizer position
*/
-void tok_set_pos( tokenizer *tok, int pos );
+void tok_set_pos(tokenizer *tok, int pos);
/**
Returns a string description of the specified token type
*/
-const wchar_t *tok_get_desc( int type );
+const wchar_t *tok_get_desc(int type);
/**
Get tokenizer error type. Should only be called if tok_last_tope returns TOK_ERROR.
*/
-int tok_get_error( tokenizer *tok );
+int tok_get_error(tokenizer *tok);
#endif