Make enum's into enum classes

This makes all enum's into enum classes except for:

* The internal enum's in src/stream.cpp:
    UtfIntroState
    UtfIntroCharType
* EMITTER_MANIP (covered separately in #989)
* Pseudo enum's (covered separately in #990)

Signed-off-by: Ted Lyngmo <ted@lyncon.se>
This commit is contained in:
Ted Lyngmo 2021-04-23 15:07:53 +02:00
parent a6bbe0e50a
commit dfe5f22310
23 changed files with 219 additions and 217 deletions

View File

@ -327,7 +327,7 @@ inline const Node Node::operator[](const Key& key) const {
detail::node* value =
static_cast<const detail::node&>(*m_pNode).get(key, m_pMemory);
if (!value) {
return Node(ZombieNode, key_to_string(key));
return Node(Zombie::ZombieNode, key_to_string(key));
}
return Node(*value, m_pMemory);
}
@ -352,7 +352,7 @@ inline const Node Node::operator[](const Node& key) const {
detail::node* value =
static_cast<const detail::node&>(*m_pNode).get(*key.m_pNode, m_pMemory);
if (!value) {
return Node(ZombieNode, key_to_string(key));
return Node(Zombie::ZombieNode, key_to_string(key));
}
return Node(*value, m_pMemory);
}

View File

@ -21,9 +21,9 @@ struct iterator_value : public Node, std::pair<Node, Node> {
iterator_value() = default;
explicit iterator_value(const Node& rhs)
: Node(rhs),
std::pair<Node, Node>(Node(Node::ZombieNode), Node(Node::ZombieNode)) {}
std::pair<Node, Node>(Node(Node::Zombie::ZombieNode), Node(Node::Zombie::ZombieNode)) {}
explicit iterator_value(const Node& key, const Node& value)
: Node(Node::ZombieNode), std::pair<Node, Node>(key, value) {}
: Node(Node::Zombie::ZombieNode), std::pair<Node, Node>(key, value) {}
};
}
}

View File

@ -114,7 +114,7 @@ class YAML_CPP_API Node {
void force_insert(const Key& key, const Value& value);
private:
enum Zombie { ZombieNode };
enum class Zombie { ZombieNode };
explicit Node(Zombie);
explicit Node(Zombie, const std::string&);
explicit Node(detail::node& node, detail::shared_memory_holder pMemory);

View File

@ -15,71 +15,71 @@
namespace YAML {
template <typename>
struct is_numeric {
enum { value = false };
static constexpr bool value = false;
};
template <>
struct is_numeric<char> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<unsigned char> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<int> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<unsigned int> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<long int> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<unsigned long int> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<short int> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<unsigned short int> {
enum { value = true };
static constexpr bool value = true;
};
#if defined(_MSC_VER) && (_MSC_VER < 1310)
template <>
struct is_numeric<__int64> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<unsigned __int64> {
enum { value = true };
static constexpr bool value = true;
};
#else
template <>
struct is_numeric<long long> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<unsigned long long> {
enum { value = true };
static constexpr bool value = true;
};
#endif
template <>
struct is_numeric<float> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<double> {
enum { value = true };
static constexpr bool value = true;
};
template <>
struct is_numeric<long double> {
enum { value = true };
static constexpr bool value = true;
};
template <bool, class T = void>

View File

@ -15,7 +15,7 @@
namespace YAML {
namespace Utils {
namespace {
enum { REPLACEMENT_CHARACTER = 0xFFFD };
constexpr int REPLACEMENT_CHARACTER = 0xFFFD;
bool IsAnchorChar(int ch) { // test for ns-anchor-char
switch (ch) {

View File

@ -69,7 +69,7 @@ inline const RegEx& Hex() {
inline const RegEx& NotPrintable() {
static const RegEx e =
RegEx(0) |
RegEx("\x01\x02\x03\x04\x05\x06\x07\x08\x0B\x0C\x7F", REGEX_OR) |
RegEx("\x01\x02\x03\x04\x05\x06\x07\x08\x0B\x0C\x7F", REGEX_OP::REGEX_OR) |
RegEx(0x0E, 0x1F) |
(RegEx('\xC2') + (RegEx('\x80', '\x84') | RegEx('\x86', '\x9F')));
return e;
@ -110,7 +110,7 @@ inline const RegEx& Value() {
return e;
}
inline const RegEx& ValueInFlow() {
static const RegEx e = RegEx(':') + (BlankOrBreak() | RegEx(",]}", REGEX_OR));
static const RegEx e = RegEx(':') + (BlankOrBreak() | RegEx(",]}", REGEX_OP::REGEX_OR));
return e;
}
inline const RegEx& ValueInJSONFlow() {
@ -122,20 +122,20 @@ inline const RegEx Comment() {
return e;
}
inline const RegEx& Anchor() {
static const RegEx e = !(RegEx("[]{},", REGEX_OR) | BlankOrBreak());
static const RegEx e = !(RegEx("[]{},", REGEX_OP::REGEX_OR) | BlankOrBreak());
return e;
}
inline const RegEx& AnchorEnd() {
static const RegEx e = RegEx("?:,]}%@`", REGEX_OR) | BlankOrBreak();
static const RegEx e = RegEx("?:,]}%@`", REGEX_OP::REGEX_OR) | BlankOrBreak();
return e;
}
inline const RegEx& URI() {
static const RegEx e = Word() | RegEx("#;/?:@&=+$,_.!~*'()[]", REGEX_OR) |
static const RegEx e = Word() | RegEx("#;/?:@&=+$,_.!~*'()[]", REGEX_OP::REGEX_OR) |
(RegEx('%') + Hex() + Hex());
return e;
}
inline const RegEx& Tag() {
static const RegEx e = Word() | RegEx("#;/?:@&=+$_.~*'()", REGEX_OR) |
static const RegEx e = Word() | RegEx("#;/?:@&=+$_.~*'()", REGEX_OP::REGEX_OR) |
(RegEx('%') + Hex() + Hex());
return e;
}
@ -148,14 +148,14 @@ inline const RegEx& Tag() {
// space.
inline const RegEx& PlainScalar() {
static const RegEx e =
!(BlankOrBreak() | RegEx(",[]{}#&*!|>\'\"%@`", REGEX_OR) |
(RegEx("-?:", REGEX_OR) + (BlankOrBreak() | RegEx())));
!(BlankOrBreak() | RegEx(",[]{}#&*!|>\'\"%@`", REGEX_OP::REGEX_OR) |
(RegEx("-?:", REGEX_OP::REGEX_OR) + (BlankOrBreak() | RegEx())));
return e;
}
inline const RegEx& PlainScalarInFlow() {
static const RegEx e =
!(BlankOrBreak() | RegEx("?,[]{}#&*!|>\'\"%@`", REGEX_OR) |
(RegEx("-:", REGEX_OR) + (Blank() | RegEx())));
!(BlankOrBreak() | RegEx("?,[]{}#&*!|>\'\"%@`", REGEX_OP::REGEX_OR) |
(RegEx("-:", REGEX_OP::REGEX_OR) + (Blank() | RegEx())));
return e;
}
inline const RegEx& EndScalar() {
@ -164,8 +164,8 @@ inline const RegEx& EndScalar() {
}
inline const RegEx& EndScalarInFlow() {
static const RegEx e =
(RegEx(':') + (BlankOrBreak() | RegEx() | RegEx(",]}", REGEX_OR))) |
RegEx(",?[]{}", REGEX_OR);
(RegEx(':') + (BlankOrBreak() | RegEx() | RegEx(",]}", REGEX_OP::REGEX_OR))) |
RegEx(",?[]{}", REGEX_OP::REGEX_OR);
return e;
}
@ -188,7 +188,7 @@ inline const RegEx& EscBreak() {
}
inline const RegEx& ChompIndicator() {
static const RegEx e = RegEx("+-", REGEX_OR);
static const RegEx e = RegEx("+-", REGEX_OP::REGEX_OR);
return e;
}
inline const RegEx& Chomp() {

View File

@ -43,7 +43,7 @@ void Parser::ParseDirectives() {
while (!m_pScanner->empty()) {
Token& token = m_pScanner->peek();
if (token.type != Token::DIRECTIVE) {
if (token.type != Token::TYPE::DIRECTIVE) {
break;
}

View File

@ -4,38 +4,38 @@ namespace YAML {
// constructors
RegEx::RegEx(REGEX_OP op) : m_op(op), m_a(0), m_z(0), m_params{} {}
RegEx::RegEx() : RegEx(REGEX_EMPTY) {}
RegEx::RegEx() : RegEx(REGEX_OP::REGEX_EMPTY) {}
RegEx::RegEx(char ch) : m_op(REGEX_MATCH), m_a(ch), m_z(0), m_params{} {}
RegEx::RegEx(char ch) : m_op(REGEX_OP::REGEX_MATCH), m_a(ch), m_z(0), m_params{} {}
RegEx::RegEx(char a, char z) : m_op(REGEX_RANGE), m_a(a), m_z(z), m_params{} {}
RegEx::RegEx(char a, char z) : m_op(REGEX_OP::REGEX_RANGE), m_a(a), m_z(z), m_params{} {}
RegEx::RegEx(const std::string& str, REGEX_OP op)
: m_op(op), m_a(0), m_z(0), m_params(str.begin(), str.end()) {}
// combination constructors
RegEx operator!(const RegEx& ex) {
RegEx ret(REGEX_NOT);
RegEx ret(REGEX_OP::REGEX_NOT);
ret.m_params.push_back(ex);
return ret;
}
RegEx operator|(const RegEx& ex1, const RegEx& ex2) {
RegEx ret(REGEX_OR);
RegEx ret(REGEX_OP::REGEX_OR);
ret.m_params.push_back(ex1);
ret.m_params.push_back(ex2);
return ret;
}
RegEx operator&(const RegEx& ex1, const RegEx& ex2) {
RegEx ret(REGEX_AND);
RegEx ret(REGEX_OP::REGEX_AND);
ret.m_params.push_back(ex1);
ret.m_params.push_back(ex2);
return ret;
}
RegEx operator+(const RegEx& ex1, const RegEx& ex2) {
RegEx ret(REGEX_SEQ);
RegEx ret(REGEX_OP::REGEX_SEQ);
ret.m_params.push_back(ex1);
ret.m_params.push_back(ex2);
return ret;

View File

@ -15,7 +15,7 @@
namespace YAML {
class Stream;
enum REGEX_OP {
enum class REGEX_OP {
REGEX_EMPTY,
REGEX_MATCH,
REGEX_RANGE,
@ -33,7 +33,7 @@ class YAML_CPP_API RegEx {
RegEx();
explicit RegEx(char ch);
RegEx(char a, char z);
RegEx(const std::string& str, REGEX_OP op = REGEX_SEQ);
RegEx(const std::string& str, REGEX_OP op = REGEX_OP::REGEX_SEQ);
~RegEx() = default;
friend YAML_CPP_API RegEx operator!(const RegEx& ex);

View File

@ -56,8 +56,8 @@ template <>
inline bool RegEx::IsValidSource<StringCharSource>(
const StringCharSource& source) const {
switch (m_op) {
case REGEX_MATCH:
case REGEX_RANGE:
case REGEX_OP::REGEX_MATCH:
case REGEX_OP::REGEX_RANGE:
return source;
default:
return true;
@ -72,19 +72,19 @@ inline int RegEx::Match(const Source& source) const {
template <typename Source>
inline int RegEx::MatchUnchecked(const Source& source) const {
switch (m_op) {
case REGEX_EMPTY:
case REGEX_OP::REGEX_EMPTY:
return MatchOpEmpty(source);
case REGEX_MATCH:
case REGEX_OP::REGEX_MATCH:
return MatchOpMatch(source);
case REGEX_RANGE:
case REGEX_OP::REGEX_RANGE:
return MatchOpRange(source);
case REGEX_OR:
case REGEX_OP::REGEX_OR:
return MatchOpOr(source);
case REGEX_AND:
case REGEX_OP::REGEX_AND:
return MatchOpAnd(source);
case REGEX_NOT:
case REGEX_OP::REGEX_NOT:
return MatchOpNot(source);
case REGEX_SEQ:
case REGEX_OP::REGEX_SEQ:
return MatchOpSeq(source);
}

View File

@ -56,12 +56,12 @@ void Scanner::EnsureTokensInQueue() {
Token& token = m_tokens.front();
// if this guy's valid, then we're done
if (token.status == Token::VALID) {
if (token.status == Token::STATUS::VALID) {
return;
}
// here's where we clean up the impossible tokens
if (token.status == Token::INVALID) {
if (token.status == Token::STATUS::INVALID) {
m_tokens.pop();
continue;
}
@ -246,7 +246,7 @@ void Scanner::StartStream() {
m_startedStream = true;
m_simpleKeyAllowed = true;
std::unique_ptr<IndentMarker> pIndent(
new IndentMarker(-1, IndentMarker::NONE));
new IndentMarker(-1, IndentMarker::INDENT_TYPE::NONE));
m_indentRefs.push_back(std::move(pIndent));
m_indents.push(&m_indentRefs.back());
}
@ -271,11 +271,11 @@ Token* Scanner::PushToken(Token::TYPE type) {
Token::TYPE Scanner::GetStartTokenFor(IndentMarker::INDENT_TYPE type) const {
switch (type) {
case IndentMarker::SEQ:
return Token::BLOCK_SEQ_START;
case IndentMarker::MAP:
return Token::BLOCK_MAP_START;
case IndentMarker::NONE:
case IndentMarker::INDENT_TYPE::SEQ:
return Token::TYPE::BLOCK_SEQ_START;
case IndentMarker::INDENT_TYPE::MAP:
return Token::TYPE::BLOCK_MAP_START;
case IndentMarker::INDENT_TYPE::NONE:
assert(false);
break;
}
@ -299,8 +299,8 @@ Scanner::IndentMarker* Scanner::PushIndentTo(int column,
return nullptr;
}
if (indent.column == lastIndent.column &&
!(indent.type == IndentMarker::SEQ &&
lastIndent.type == IndentMarker::MAP)) {
!(indent.type == IndentMarker::INDENT_TYPE::SEQ &&
lastIndent.type == IndentMarker::INDENT_TYPE::MAP)) {
return nullptr;
}
@ -326,7 +326,7 @@ void Scanner::PopIndentToHere() {
break;
}
if (indent.column == INPUT.column() &&
!(indent.type == IndentMarker::SEQ &&
!(indent.type == IndentMarker::INDENT_TYPE::SEQ &&
!Exp::BlockEntry().Matches(INPUT))) {
break;
}
@ -335,7 +335,7 @@ void Scanner::PopIndentToHere() {
}
while (!m_indents.empty() &&
m_indents.top()->status == IndentMarker::INVALID) {
m_indents.top()->status == IndentMarker::STATUS::INVALID) {
PopIndent();
}
}
@ -349,7 +349,7 @@ void Scanner::PopAllIndents() {
// now pop away
while (!m_indents.empty()) {
const IndentMarker& indent = *m_indents.top();
if (indent.type == IndentMarker::NONE) {
if (indent.type == IndentMarker::INDENT_TYPE::NONE) {
break;
}
@ -361,15 +361,15 @@ void Scanner::PopIndent() {
const IndentMarker& indent = *m_indents.top();
m_indents.pop();
if (indent.status != IndentMarker::VALID) {
if (indent.status != IndentMarker::STATUS::VALID) {
InvalidateSimpleKey();
return;
}
if (indent.type == IndentMarker::SEQ) {
m_tokens.push(Token(Token::BLOCK_SEQ_END, INPUT.mark()));
} else if (indent.type == IndentMarker::MAP) {
m_tokens.push(Token(Token::BLOCK_MAP_END, INPUT.mark()));
if (indent.type == IndentMarker::INDENT_TYPE::SEQ) {
m_tokens.push(Token(Token::TYPE::BLOCK_SEQ_END, INPUT.mark()));
} else if (indent.type == IndentMarker::INDENT_TYPE::MAP) {
m_tokens.push(Token(Token::TYPE::BLOCK_MAP_END, INPUT.mark()));
}
}

View File

@ -46,10 +46,10 @@ class Scanner {
private:
struct IndentMarker {
enum INDENT_TYPE { MAP, SEQ, NONE };
enum STATUS { VALID, INVALID, UNKNOWN };
enum class INDENT_TYPE { MAP, SEQ, NONE };
enum class STATUS { VALID, INVALID, UNKNOWN };
IndentMarker(int column_, INDENT_TYPE type_)
: column(column_), type(type_), status(VALID), pStartToken(nullptr) {}
: column(column_), type(type_), status(STATUS::VALID), pStartToken(nullptr) {}
int column;
INDENT_TYPE type;
@ -57,7 +57,7 @@ class Scanner {
Token *pStartToken;
};
enum FLOW_MARKER { FLOW_MAP, FLOW_SEQ };
enum class FLOW_MARKER { FLOW_MAP, FLOW_SEQ };
private:
// scanning

View File

@ -20,7 +20,7 @@ namespace YAML {
// and different places in the above flow.
std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
bool foundNonEmptyLine = false;
bool pastOpeningBreak = (params.fold == FOLD_FLOW);
bool pastOpeningBreak = (params.fold == FOLD::FOLD_FLOW);
bool emptyLine = false, moreIndented = false;
int foldedNewlineCount = 0;
bool foldedNewlineStartedMoreIndented = false;
@ -45,10 +45,10 @@ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
// document indicator?
if (INPUT.column() == 0 && Exp::DocIndicator().Matches(INPUT)) {
if (params.onDocIndicator == BREAK) {
if (params.onDocIndicator == ACTION::BREAK) {
break;
}
if (params.onDocIndicator == THROW) {
if (params.onDocIndicator == ACTION::THROW) {
throw ParserException(INPUT.mark(), ErrorMsg::DOC_IN_SCALAR);
}
}
@ -91,7 +91,7 @@ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
}
// doc indicator?
if (params.onDocIndicator == BREAK && INPUT.column() == 0 &&
if (params.onDocIndicator == ACTION::BREAK && INPUT.column() == 0 &&
Exp::DocIndicator().Matches(INPUT)) {
break;
}
@ -106,7 +106,7 @@ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
}
// do we remove trailing whitespace?
if (params.fold == FOLD_FLOW)
if (params.fold == FOLD::FOLD_FLOW)
scalar.erase(lastNonWhitespaceChar);
// ********************************
@ -134,7 +134,7 @@ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
while (Exp::Blank().Matches(INPUT)) {
// we check for tabs that masquerade as indentation
if (INPUT.peek() == '\t' && INPUT.column() < params.indent &&
params.onTabInIndentation == THROW) {
params.onTabInIndentation == ACTION::THROW) {
throw ParserException(INPUT.mark(), ErrorMsg::TAB_IN_INDENTATION);
}
@ -152,17 +152,17 @@ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
// was this an empty line?
bool nextEmptyLine = Exp::Break().Matches(INPUT);
bool nextMoreIndented = Exp::Blank().Matches(INPUT);
if (params.fold == FOLD_BLOCK && foldedNewlineCount == 0 && nextEmptyLine)
if (params.fold == FOLD::FOLD_BLOCK && foldedNewlineCount == 0 && nextEmptyLine)
foldedNewlineStartedMoreIndented = moreIndented;
// for block scalars, we always start with a newline, so we should ignore it
// (not fold or keep)
if (pastOpeningBreak) {
switch (params.fold) {
case DONT_FOLD:
case FOLD::DONT_FOLD:
scalar += "\n";
break;
case FOLD_BLOCK:
case FOLD::FOLD_BLOCK:
if (!emptyLine && !nextEmptyLine && !moreIndented &&
!nextMoreIndented && INPUT.column() >= params.indent) {
scalar += " ";
@ -181,7 +181,7 @@ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
foldedNewlineCount = 0;
}
break;
case FOLD_FLOW:
case FOLD::FOLD_FLOW:
if (nextEmptyLine) {
scalar += "\n";
} else if (!emptyLine && !escapedNewline) {
@ -216,7 +216,7 @@ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
}
switch (params.chomp) {
case CLIP: {
case CHOMP::CLIP: {
std::size_t pos = scalar.find_last_not_of('\n');
if (lastEscapedChar != std::string::npos) {
if (pos < lastEscapedChar || pos == std::string::npos) {
@ -229,7 +229,7 @@ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params) {
scalar.erase(pos + 2);
}
} break;
case STRIP: {
case CHOMP::STRIP: {
std::size_t pos = scalar.find_last_not_of('\n');
if (lastEscapedChar != std::string::npos) {
if (pos < lastEscapedChar || pos == std::string::npos) {

View File

@ -13,9 +13,9 @@
#include "stream.h"
namespace YAML {
enum CHOMP { STRIP = -1, CLIP, KEEP };
enum ACTION { NONE, BREAK, THROW };
enum FOLD { DONT_FOLD, FOLD_BLOCK, FOLD_FLOW };
enum class CHOMP { STRIP = -1, CLIP, KEEP };
enum class ACTION { NONE, BREAK, THROW };
enum class FOLD { DONT_FOLD, FOLD_BLOCK, FOLD_FLOW };
struct ScanScalarParams {
ScanScalarParams()
@ -25,11 +25,11 @@ struct ScanScalarParams {
detectIndent(false),
eatLeadingWhitespace(0),
escape(0),
fold(DONT_FOLD),
fold(FOLD::DONT_FOLD),
trimTrailingSpaces(0),
chomp(CLIP),
onDocIndicator(NONE),
onTabInIndentation(NONE),
chomp(CHOMP::CLIP),
onDocIndicator(ACTION::NONE),
onTabInIndentation(ACTION::NONE),
leadingSpaces(false) {}
// input:

View File

@ -29,7 +29,7 @@ void Scanner::ScanDirective() {
m_canBeJSONFlow = false;
// store pos and eat indicator
Token token(Token::DIRECTIVE, INPUT.mark());
Token token(Token::TYPE::DIRECTIVE, INPUT.mark());
INPUT.eat(1);
// read name
@ -67,7 +67,7 @@ void Scanner::ScanDocStart() {
// eat
Mark mark = INPUT.mark();
INPUT.eat(3);
m_tokens.push(Token(Token::DOC_START, mark));
m_tokens.push(Token(Token::TYPE::DOC_START, mark));
}
// DocEnd
@ -80,7 +80,7 @@ void Scanner::ScanDocEnd() {
// eat
Mark mark = INPUT.mark();
INPUT.eat(3);
m_tokens.push(Token(Token::DOC_END, mark));
m_tokens.push(Token(Token::TYPE::DOC_END, mark));
}
// FlowStart
@ -93,10 +93,10 @@ void Scanner::ScanFlowStart() {
// eat
Mark mark = INPUT.mark();
char ch = INPUT.get();
FLOW_MARKER flowType = (ch == Keys::FlowSeqStart ? FLOW_SEQ : FLOW_MAP);
FLOW_MARKER flowType = (ch == Keys::FlowSeqStart ? FLOW_MARKER::FLOW_SEQ : FLOW_MARKER::FLOW_MAP);
m_flows.push(flowType);
Token::TYPE type =
(flowType == FLOW_SEQ ? Token::FLOW_SEQ_START : Token::FLOW_MAP_START);
(flowType == FLOW_MARKER::FLOW_SEQ ? Token::TYPE::FLOW_SEQ_START : Token::TYPE::FLOW_MAP_START);
m_tokens.push(Token(type, mark));
}
@ -107,9 +107,9 @@ void Scanner::ScanFlowEnd() {
// we might have a solo entry in the flow context
if (InFlowContext()) {
if (m_flows.top() == FLOW_MAP && VerifySimpleKey())
m_tokens.push(Token(Token::VALUE, INPUT.mark()));
else if (m_flows.top() == FLOW_SEQ)
if (m_flows.top() == FLOW_MARKER::FLOW_MAP && VerifySimpleKey())
m_tokens.push(Token(Token::TYPE::VALUE, INPUT.mark()));
else if (m_flows.top() == FLOW_MARKER::FLOW_SEQ)
InvalidateSimpleKey();
}
@ -121,12 +121,12 @@ void Scanner::ScanFlowEnd() {
char ch = INPUT.get();
// check that it matches the start
FLOW_MARKER flowType = (ch == Keys::FlowSeqEnd ? FLOW_SEQ : FLOW_MAP);
FLOW_MARKER flowType = (ch == Keys::FlowSeqEnd ? FLOW_MARKER::FLOW_SEQ : FLOW_MARKER::FLOW_MAP);
if (m_flows.top() != flowType)
throw ParserException(mark, ErrorMsg::FLOW_END);
m_flows.pop();
Token::TYPE type = (flowType ? Token::FLOW_SEQ_END : Token::FLOW_MAP_END);
Token::TYPE type = (flowType == FLOW_MARKER::FLOW_SEQ ? Token::TYPE::FLOW_SEQ_END : Token::TYPE::FLOW_MAP_END);
m_tokens.push(Token(type, mark));
}
@ -134,9 +134,9 @@ void Scanner::ScanFlowEnd() {
void Scanner::ScanFlowEntry() {
// we might have a solo entry in the flow context
if (InFlowContext()) {
if (m_flows.top() == FLOW_MAP && VerifySimpleKey())
m_tokens.push(Token(Token::VALUE, INPUT.mark()));
else if (m_flows.top() == FLOW_SEQ)
if (m_flows.top() == FLOW_MARKER::FLOW_MAP && VerifySimpleKey())
m_tokens.push(Token(Token::TYPE::VALUE, INPUT.mark()));
else if (m_flows.top() == FLOW_MARKER::FLOW_SEQ)
InvalidateSimpleKey();
}
@ -146,7 +146,7 @@ void Scanner::ScanFlowEntry() {
// eat
Mark mark = INPUT.mark();
INPUT.eat(1);
m_tokens.push(Token(Token::FLOW_ENTRY, mark));
m_tokens.push(Token(Token::TYPE::FLOW_ENTRY, mark));
}
// BlockEntry
@ -159,14 +159,14 @@ void Scanner::ScanBlockEntry() {
if (!m_simpleKeyAllowed)
throw ParserException(INPUT.mark(), ErrorMsg::BLOCK_ENTRY);
PushIndentTo(INPUT.column(), IndentMarker::SEQ);
PushIndentTo(INPUT.column(), IndentMarker::INDENT_TYPE::SEQ);
m_simpleKeyAllowed = true;
m_canBeJSONFlow = false;
// eat
Mark mark = INPUT.mark();
INPUT.eat(1);
m_tokens.push(Token(Token::BLOCK_ENTRY, mark));
m_tokens.push(Token(Token::TYPE::BLOCK_ENTRY, mark));
}
// Key
@ -176,7 +176,7 @@ void Scanner::ScanKey() {
if (!m_simpleKeyAllowed)
throw ParserException(INPUT.mark(), ErrorMsg::MAP_KEY);
PushIndentTo(INPUT.column(), IndentMarker::MAP);
PushIndentTo(INPUT.column(), IndentMarker::INDENT_TYPE::MAP);
}
// can only put a simple key here if we're in block context
@ -185,7 +185,7 @@ void Scanner::ScanKey() {
// eat
Mark mark = INPUT.mark();
INPUT.eat(1);
m_tokens.push(Token(Token::KEY, mark));
m_tokens.push(Token(Token::TYPE::KEY, mark));
}
// Value
@ -204,7 +204,7 @@ void Scanner::ScanValue() {
if (!m_simpleKeyAllowed)
throw ParserException(INPUT.mark(), ErrorMsg::MAP_VALUE);
PushIndentTo(INPUT.column(), IndentMarker::MAP);
PushIndentTo(INPUT.column(), IndentMarker::INDENT_TYPE::MAP);
}
// can only put a simple key here if we're in block context
@ -214,7 +214,7 @@ void Scanner::ScanValue() {
// eat
Mark mark = INPUT.mark();
INPUT.eat(1);
m_tokens.push(Token(Token::VALUE, mark));
m_tokens.push(Token(Token::TYPE::VALUE, mark));
}
// AnchorOrAlias
@ -247,7 +247,7 @@ void Scanner::ScanAnchorOrAlias() {
: ErrorMsg::CHAR_IN_ANCHOR);
// and we're done
Token token(alias ? Token::ALIAS : Token::ANCHOR, mark);
Token token(alias ? Token::TYPE::ALIAS : Token::TYPE::ANCHOR, mark);
token.value = name;
m_tokens.push(token);
}
@ -259,32 +259,34 @@ void Scanner::ScanTag() {
m_simpleKeyAllowed = false;
m_canBeJSONFlow = false;
Token token(Token::TAG, INPUT.mark());
Token token(Token::TYPE::TAG, INPUT.mark());
// eat the indicator
INPUT.get();
using token_data_t = decltype(token.data);
if (INPUT && INPUT.peek() == Keys::VerbatimTagStart) {
std::string tag = ScanVerbatimTag(INPUT);
token.value = tag;
token.data = Tag::VERBATIM;
token.data = static_cast<token_data_t>(Tag::TYPE::VERBATIM);
} else {
bool canBeHandle;
token.value = ScanTagHandle(INPUT, canBeHandle);
if (!canBeHandle && token.value.empty())
token.data = Tag::NON_SPECIFIC;
token.data = static_cast<token_data_t>(Tag::TYPE::NON_SPECIFIC);
else if (token.value.empty())
token.data = Tag::SECONDARY_HANDLE;
token.data = static_cast<token_data_t>(Tag::TYPE::SECONDARY_HANDLE);
else
token.data = Tag::PRIMARY_HANDLE;
token.data = static_cast<token_data_t>(Tag::TYPE::PRIMARY_HANDLE);
// is there a suffix?
if (canBeHandle && INPUT.peek() == Keys::Tag) {
// eat the indicator
INPUT.get();
token.params.push_back(ScanTagSuffix(INPUT));
token.data = Tag::NAMED_HANDLE;
token.data = static_cast<token_data_t>(Tag::TYPE::NAMED_HANDLE);
}
}
@ -301,12 +303,12 @@ void Scanner::ScanPlainScalar() {
(InFlowContext() ? &Exp::ScanScalarEndInFlow() : &Exp::ScanScalarEnd());
params.eatEnd = false;
params.indent = (InFlowContext() ? 0 : GetTopIndent() + 1);
params.fold = FOLD_FLOW;
params.fold = FOLD::FOLD_FLOW;
params.eatLeadingWhitespace = true;
params.trimTrailingSpaces = true;
params.chomp = STRIP;
params.onDocIndicator = BREAK;
params.onTabInIndentation = THROW;
params.chomp = CHOMP::STRIP;
params.onDocIndicator = ACTION::BREAK;
params.onTabInIndentation = ACTION::THROW;
// insert a potential simple key
InsertPotentialSimpleKey();
@ -322,7 +324,7 @@ void Scanner::ScanPlainScalar() {
// if(Exp::IllegalCharInScalar.Matches(INPUT))
// throw ParserException(INPUT.mark(), ErrorMsg::CHAR_IN_SCALAR);
Token token(Token::PLAIN_SCALAR, mark);
Token token(Token::TYPE::PLAIN_SCALAR, mark);
token.value = scalar;
m_tokens.push(token);
}
@ -343,11 +345,11 @@ void Scanner::ScanQuotedScalar() {
params.eatEnd = true;
params.escape = (single ? '\'' : '\\');
params.indent = 0;
params.fold = FOLD_FLOW;
params.fold = FOLD::FOLD_FLOW;
params.eatLeadingWhitespace = true;
params.trimTrailingSpaces = false;
params.chomp = CLIP;
params.onDocIndicator = THROW;
params.chomp = CHOMP::CLIP;
params.onDocIndicator = ACTION::THROW;
// insert a potential simple key
InsertPotentialSimpleKey();
@ -362,7 +364,7 @@ void Scanner::ScanQuotedScalar() {
m_simpleKeyAllowed = false;
m_canBeJSONFlow = true;
Token token(Token::NON_PLAIN_SCALAR, mark);
Token token(Token::TYPE::NON_PLAIN_SCALAR, mark);
token.value = scalar;
m_tokens.push(token);
}
@ -382,17 +384,17 @@ void Scanner::ScanBlockScalar() {
// eat block indicator ('|' or '>')
Mark mark = INPUT.mark();
char indicator = INPUT.get();
params.fold = (indicator == Keys::FoldedScalar ? FOLD_BLOCK : DONT_FOLD);
params.fold = (indicator == Keys::FoldedScalar ? FOLD::FOLD_BLOCK : FOLD::DONT_FOLD);
// eat chomping/indentation indicators
params.chomp = CLIP;
params.chomp = CHOMP::CLIP;
int n = Exp::Chomp().Match(INPUT);
for (int i = 0; i < n; i++) {
char ch = INPUT.get();
if (ch == '+')
params.chomp = KEEP;
params.chomp = CHOMP::KEEP;
else if (ch == '-')
params.chomp = STRIP;
params.chomp = CHOMP::STRIP;
else if (Exp::Digit().Matches(ch)) {
if (ch == '0')
throw ParserException(INPUT.mark(), ErrorMsg::ZERO_INDENT_IN_BLOCK);
@ -421,7 +423,7 @@ void Scanner::ScanBlockScalar() {
params.eatLeadingWhitespace = false;
params.trimTrailingSpaces = false;
params.onTabInIndentation = THROW;
params.onTabInIndentation = ACTION::THROW;
scalar = ScanScalar(INPUT, params);
@ -430,7 +432,7 @@ void Scanner::ScanBlockScalar() {
m_simpleKeyAllowed = true;
m_canBeJSONFlow = false;
Token token(Token::NON_PLAIN_SCALAR, mark);
Token token(Token::TYPE::NON_PLAIN_SCALAR, mark);
token.value = scalar;
m_tokens.push(token);
}

View File

@ -16,20 +16,20 @@ void Scanner::SimpleKey::Validate() {
// we "garbage collect" them so we can
// always refer to them
if (pIndent)
pIndent->status = IndentMarker::VALID;
pIndent->status = IndentMarker::STATUS::VALID;
if (pMapStart)
pMapStart->status = Token::VALID;
pMapStart->status = Token::STATUS::VALID;
if (pKey)
pKey->status = Token::VALID;
pKey->status = Token::STATUS::VALID;
}
void Scanner::SimpleKey::Invalidate() {
if (pIndent)
pIndent->status = IndentMarker::INVALID;
pIndent->status = IndentMarker::STATUS::INVALID;
if (pMapStart)
pMapStart->status = Token::INVALID;
pMapStart->status = Token::STATUS::INVALID;
if (pKey)
pKey->status = Token::INVALID;
pKey->status = Token::STATUS::INVALID;
}
// CanInsertPotentialSimpleKey
@ -63,18 +63,18 @@ void Scanner::InsertPotentialSimpleKey() {
// first add a map start, if necessary
if (InBlockContext()) {
key.pIndent = PushIndentTo(INPUT.column(), IndentMarker::MAP);
key.pIndent = PushIndentTo(INPUT.column(), IndentMarker::INDENT_TYPE::MAP);
if (key.pIndent) {
key.pIndent->status = IndentMarker::UNKNOWN;
key.pIndent->status = IndentMarker::STATUS::UNKNOWN;
key.pMapStart = key.pIndent->pStartToken;
key.pMapStart->status = Token::UNVERIFIED;
key.pMapStart->status = Token::STATUS::UNVERIFIED;
}
}
// then add the (now unverified) key
m_tokens.push(Token(Token::KEY, INPUT.mark()));
m_tokens.push(Token(Token::TYPE::KEY, INPUT.mark()));
key.pKey = &m_tokens.back();
key.pKey->status = Token::UNVERIFIED;
key.pKey->status = Token::STATUS::UNVERIFIED;
m_simpleKeys.push(key);
}

View File

@ -34,7 +34,7 @@ void SingleDocParser::HandleDocument(EventHandler& eventHandler) {
eventHandler.OnDocumentStart(m_scanner.peek().mark);
// eat doc start
if (m_scanner.peek().type == Token::DOC_START)
if (m_scanner.peek().type == Token::TYPE::DOC_START)
m_scanner.pop();
// recurse!
@ -43,7 +43,7 @@ void SingleDocParser::HandleDocument(EventHandler& eventHandler) {
eventHandler.OnDocumentEnd();
// and finally eat any doc ends we see
while (!m_scanner.empty() && m_scanner.peek().type == Token::DOC_END)
while (!m_scanner.empty() && m_scanner.peek().type == Token::TYPE::DOC_END)
m_scanner.pop();
}
@ -60,7 +60,7 @@ void SingleDocParser::HandleNode(EventHandler& eventHandler) {
Mark mark = m_scanner.peek().mark;
// special case: a value node by itself must be a map, with no header
if (m_scanner.peek().type == Token::VALUE) {
if (m_scanner.peek().type == Token::TYPE::VALUE) {
eventHandler.OnMapStart(mark, "?", NullAnchor, EmitterStyle::Default);
HandleMap(eventHandler);
eventHandler.OnMapEnd();
@ -68,7 +68,7 @@ void SingleDocParser::HandleNode(EventHandler& eventHandler) {
}
// special case: an alias node
if (m_scanner.peek().type == Token::ALIAS) {
if (m_scanner.peek().type == Token::TYPE::ALIAS) {
eventHandler.OnAlias(mark, LookupAnchor(mark, m_scanner.peek().value));
m_scanner.pop();
return;
@ -92,9 +92,9 @@ void SingleDocParser::HandleNode(EventHandler& eventHandler) {
// add non-specific tags
if (tag.empty())
tag = (token.type == Token::NON_PLAIN_SCALAR ? "!" : "?");
tag = (token.type == Token::TYPE::NON_PLAIN_SCALAR ? "!" : "?");
if (token.type == Token::PLAIN_SCALAR
if (token.type == Token::TYPE::PLAIN_SCALAR
&& tag.compare("?") == 0 && IsNullString(token.value)) {
eventHandler.OnNull(mark, anchor);
m_scanner.pop();
@ -103,32 +103,32 @@ void SingleDocParser::HandleNode(EventHandler& eventHandler) {
// now split based on what kind of node we should be
switch (token.type) {
case Token::PLAIN_SCALAR:
case Token::NON_PLAIN_SCALAR:
case Token::TYPE::PLAIN_SCALAR:
case Token::TYPE::NON_PLAIN_SCALAR:
eventHandler.OnScalar(mark, tag, anchor, token.value);
m_scanner.pop();
return;
case Token::FLOW_SEQ_START:
case Token::TYPE::FLOW_SEQ_START:
eventHandler.OnSequenceStart(mark, tag, anchor, EmitterStyle::Flow);
HandleSequence(eventHandler);
eventHandler.OnSequenceEnd();
return;
case Token::BLOCK_SEQ_START:
case Token::TYPE::BLOCK_SEQ_START:
eventHandler.OnSequenceStart(mark, tag, anchor, EmitterStyle::Block);
HandleSequence(eventHandler);
eventHandler.OnSequenceEnd();
return;
case Token::FLOW_MAP_START:
case Token::TYPE::FLOW_MAP_START:
eventHandler.OnMapStart(mark, tag, anchor, EmitterStyle::Flow);
HandleMap(eventHandler);
eventHandler.OnMapEnd();
return;
case Token::BLOCK_MAP_START:
case Token::TYPE::BLOCK_MAP_START:
eventHandler.OnMapStart(mark, tag, anchor, EmitterStyle::Block);
HandleMap(eventHandler);
eventHandler.OnMapEnd();
return;
case Token::KEY:
case Token::TYPE::KEY:
// compact maps can only go in a flow sequence
if (m_pCollectionStack->GetCurCollectionType() ==
CollectionType::FlowSeq) {
@ -151,10 +151,10 @@ void SingleDocParser::HandleNode(EventHandler& eventHandler) {
void SingleDocParser::HandleSequence(EventHandler& eventHandler) {
// split based on start token
switch (m_scanner.peek().type) {
case Token::BLOCK_SEQ_START:
case Token::TYPE::BLOCK_SEQ_START:
HandleBlockSequence(eventHandler);
break;
case Token::FLOW_SEQ_START:
case Token::TYPE::FLOW_SEQ_START:
HandleFlowSequence(eventHandler);
break;
default:
@ -172,18 +172,18 @@ void SingleDocParser::HandleBlockSequence(EventHandler& eventHandler) {
throw ParserException(m_scanner.mark(), ErrorMsg::END_OF_SEQ);
Token token = m_scanner.peek();
if (token.type != Token::BLOCK_ENTRY && token.type != Token::BLOCK_SEQ_END)
if (token.type != Token::TYPE::BLOCK_ENTRY && token.type != Token::TYPE::BLOCK_SEQ_END)
throw ParserException(token.mark, ErrorMsg::END_OF_SEQ);
m_scanner.pop();
if (token.type == Token::BLOCK_SEQ_END)
if (token.type == Token::TYPE::BLOCK_SEQ_END)
break;
// check for null
if (!m_scanner.empty()) {
const Token& nextToken = m_scanner.peek();
if (nextToken.type == Token::BLOCK_ENTRY ||
nextToken.type == Token::BLOCK_SEQ_END) {
if (nextToken.type == Token::TYPE::BLOCK_ENTRY ||
nextToken.type == Token::TYPE::BLOCK_SEQ_END) {
eventHandler.OnNull(nextToken.mark, NullAnchor);
continue;
}
@ -205,7 +205,7 @@ void SingleDocParser::HandleFlowSequence(EventHandler& eventHandler) {
throw ParserException(m_scanner.mark(), ErrorMsg::END_OF_SEQ_FLOW);
// first check for end
if (m_scanner.peek().type == Token::FLOW_SEQ_END) {
if (m_scanner.peek().type == Token::TYPE::FLOW_SEQ_END) {
m_scanner.pop();
break;
}
@ -219,9 +219,9 @@ void SingleDocParser::HandleFlowSequence(EventHandler& eventHandler) {
// now eat the separator (or could be a sequence end, which we ignore - but
// if it's neither, then it's a bad node)
Token& token = m_scanner.peek();
if (token.type == Token::FLOW_ENTRY)
if (token.type == Token::TYPE::FLOW_ENTRY)
m_scanner.pop();
else if (token.type != Token::FLOW_SEQ_END)
else if (token.type != Token::TYPE::FLOW_SEQ_END)
throw ParserException(token.mark, ErrorMsg::END_OF_SEQ_FLOW);
}
@ -231,16 +231,16 @@ void SingleDocParser::HandleFlowSequence(EventHandler& eventHandler) {
void SingleDocParser::HandleMap(EventHandler& eventHandler) {
// split based on start token
switch (m_scanner.peek().type) {
case Token::BLOCK_MAP_START:
case Token::TYPE::BLOCK_MAP_START:
HandleBlockMap(eventHandler);
break;
case Token::FLOW_MAP_START:
case Token::TYPE::FLOW_MAP_START:
HandleFlowMap(eventHandler);
break;
case Token::KEY:
case Token::TYPE::KEY:
HandleCompactMap(eventHandler);
break;
case Token::VALUE:
case Token::TYPE::VALUE:
HandleCompactMapWithNoKey(eventHandler);
break;
default:
@ -258,17 +258,17 @@ void SingleDocParser::HandleBlockMap(EventHandler& eventHandler) {
throw ParserException(m_scanner.mark(), ErrorMsg::END_OF_MAP);
Token token = m_scanner.peek();
if (token.type != Token::KEY && token.type != Token::VALUE &&
token.type != Token::BLOCK_MAP_END)
if (token.type != Token::TYPE::KEY && token.type != Token::TYPE::VALUE &&
token.type != Token::TYPE::BLOCK_MAP_END)
throw ParserException(token.mark, ErrorMsg::END_OF_MAP);
if (token.type == Token::BLOCK_MAP_END) {
if (token.type == Token::TYPE::BLOCK_MAP_END) {
m_scanner.pop();
break;
}
// grab key (if non-null)
if (token.type == Token::KEY) {
if (token.type == Token::TYPE::KEY) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
@ -276,7 +276,7 @@ void SingleDocParser::HandleBlockMap(EventHandler& eventHandler) {
}
// now grab value (optional)
if (!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
if (!m_scanner.empty() && m_scanner.peek().type == Token::TYPE::VALUE) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
@ -299,13 +299,13 @@ void SingleDocParser::HandleFlowMap(EventHandler& eventHandler) {
Token& token = m_scanner.peek();
const Mark mark = token.mark;
// first check for end
if (token.type == Token::FLOW_MAP_END) {
if (token.type == Token::TYPE::FLOW_MAP_END) {
m_scanner.pop();
break;
}
// grab key (if non-null)
if (token.type == Token::KEY) {
if (token.type == Token::TYPE::KEY) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
@ -313,7 +313,7 @@ void SingleDocParser::HandleFlowMap(EventHandler& eventHandler) {
}
// now grab value (optional)
if (!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
if (!m_scanner.empty() && m_scanner.peek().type == Token::TYPE::VALUE) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
@ -326,9 +326,9 @@ void SingleDocParser::HandleFlowMap(EventHandler& eventHandler) {
// now eat the separator (or could be a map end, which we ignore - but if
// it's neither, then it's a bad node)
Token& nextToken = m_scanner.peek();
if (nextToken.type == Token::FLOW_ENTRY)
if (nextToken.type == Token::TYPE::FLOW_ENTRY)
m_scanner.pop();
else if (nextToken.type != Token::FLOW_MAP_END)
else if (nextToken.type != Token::TYPE::FLOW_MAP_END)
throw ParserException(nextToken.mark, ErrorMsg::END_OF_MAP_FLOW);
}
@ -345,7 +345,7 @@ void SingleDocParser::HandleCompactMap(EventHandler& eventHandler) {
HandleNode(eventHandler);
// now grab value (optional)
if (!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
if (!m_scanner.empty() && m_scanner.peek().type == Token::TYPE::VALUE) {
m_scanner.pop();
HandleNode(eventHandler);
} else {
@ -382,10 +382,10 @@ void SingleDocParser::ParseProperties(std::string& tag, anchor_t& anchor,
return;
switch (m_scanner.peek().type) {
case Token::TAG:
case Token::TYPE::TAG:
ParseTag(tag);
break;
case Token::ANCHOR:
case Token::TYPE::ANCHOR:
ParseAnchor(anchor, anchor_name);
break;
default:

View File

@ -218,22 +218,22 @@ Stream::Stream(std::istream& input)
switch (state) {
case uis_utf8:
m_charSet = utf8;
m_charSet = CharacterSet::utf8;
break;
case uis_utf16le:
m_charSet = utf16le;
m_charSet = CharacterSet::utf16le;
break;
case uis_utf16be:
m_charSet = utf16be;
m_charSet = CharacterSet::utf16be;
break;
case uis_utf32le:
m_charSet = utf32le;
m_charSet = CharacterSet::utf32le;
break;
case uis_utf32be:
m_charSet = utf32be;
m_charSet = CharacterSet::utf32be;
break;
default:
m_charSet = utf8;
m_charSet = CharacterSet::utf8;
break;
}
@ -301,19 +301,19 @@ void Stream::AdvanceCurrent() {
bool Stream::_ReadAheadTo(size_t i) const {
while (m_input.good() && (m_readahead.size() <= i)) {
switch (m_charSet) {
case utf8:
case CharacterSet::utf8:
StreamInUtf8();
break;
case utf16le:
case CharacterSet::utf16le:
StreamInUtf16();
break;
case utf16be:
case CharacterSet::utf16be:
StreamInUtf16();
break;
case utf32le:
case CharacterSet::utf32le:
StreamInUtf32();
break;
case utf32be:
case CharacterSet::utf32be:
StreamInUtf32();
break;
}
@ -336,7 +336,7 @@ void Stream::StreamInUtf8() const {
void Stream::StreamInUtf16() const {
unsigned long ch = 0;
unsigned char bytes[2];
int nBigEnd = (m_charSet == utf16be) ? 0 : 1;
int nBigEnd = (m_charSet == CharacterSet::utf16be) ? 0 : 1;
bytes[0] = GetNextByte();
bytes[1] = GetNextByte();
@ -426,7 +426,7 @@ void Stream::StreamInUtf32() const {
unsigned long ch = 0;
unsigned char bytes[4];
int* pIndexes = (m_charSet == utf32be) ? indexes[1] : indexes[0];
int* pIndexes = (m_charSet == CharacterSet::utf32be) ? indexes[1] : indexes[0];
bytes[0] = GetNextByte();
bytes[1] = GetNextByte();

View File

@ -47,7 +47,7 @@ class Stream {
void ResetColumn() { m_mark.column = 0; }
private:
enum CharacterSet { utf8, utf16le, utf16be, utf32le, utf32be };
enum class CharacterSet { utf8, utf16le, utf16be, utf32le, utf32be };
std::istream& m_input;
Mark m_mark;

View File

@ -9,20 +9,20 @@ namespace YAML {
Tag::Tag(const Token& token)
: type(static_cast<TYPE>(token.data)), handle{}, value{} {
switch (type) {
case VERBATIM:
case TYPE::VERBATIM:
value = token.value;
break;
case PRIMARY_HANDLE:
case TYPE::PRIMARY_HANDLE:
value = token.value;
break;
case SECONDARY_HANDLE:
case TYPE::SECONDARY_HANDLE:
value = token.value;
break;
case NAMED_HANDLE:
case TYPE::NAMED_HANDLE:
handle = token.value;
value = token.params[0];
break;
case NON_SPECIFIC:
case TYPE::NON_SPECIFIC:
break;
default:
assert(false);
@ -31,15 +31,15 @@ Tag::Tag(const Token& token)
const std::string Tag::Translate(const Directives& directives) {
switch (type) {
case VERBATIM:
case TYPE::VERBATIM:
return value;
case PRIMARY_HANDLE:
case TYPE::PRIMARY_HANDLE:
return directives.TranslateTagHandle("!") + value;
case SECONDARY_HANDLE:
case TYPE::SECONDARY_HANDLE:
return directives.TranslateTagHandle("!!") + value;
case NAMED_HANDLE:
case TYPE::NAMED_HANDLE:
return directives.TranslateTagHandle("!" + handle + "!") + value;
case NON_SPECIFIC:
case TYPE::NON_SPECIFIC:
// TODO:
return "!";
default:

View File

@ -14,7 +14,7 @@ struct Directives;
struct Token;
struct Tag {
enum TYPE {
enum class TYPE {
VERBATIM,
PRIMARY_HANDLE,
SECONDARY_HANDLE,

View File

@ -22,8 +22,8 @@ const std::string TokenNames[] = {
struct Token {
// enums
enum STATUS { VALID, INVALID, UNVERIFIED };
enum TYPE {
enum class STATUS { VALID, INVALID, UNVERIFIED };
enum class TYPE {
DIRECTIVE,
DOC_START,
DOC_END,
@ -49,10 +49,10 @@ struct Token {
// data
Token(TYPE type_, const Mark& mark_)
: status(VALID), type(type_), mark(mark_), value{}, params{}, data(0) {}
: status(STATUS::VALID), type(type_), mark(mark_), value{}, params{}, data(0) {}
friend std::ostream& operator<<(std::ostream& out, const Token& token) {
out << TokenNames[token.type] << std::string(": ") << token.value;
out << TokenNames[static_cast<int>(token.type)] << std::string(": ") << token.value;
for (const std::string& param : token.params)
out << std::string(" ") << param;
return out;

View File

@ -165,7 +165,7 @@ TEST(RegExTest, OperatorPlus) {
TEST(RegExTest, StringOr) {
std::string str = "abcde";
RegEx ex = RegEx(str, YAML::REGEX_OR);
RegEx ex = RegEx(str, YAML::REGEX_OP::REGEX_OR);
for (size_t i = 0; i < str.size(); ++i) {
EXPECT_TRUE(ex.Matches(str.substr(i, 1)));