Archived
1
0

polish polish polish ukraine

This commit is contained in:
Marrub 2015-06-05 13:38:34 -04:00
parent 47269c316a
commit 59407ec0a5
2 changed files with 108 additions and 100 deletions

113
src/lt.c
View File

@ -23,26 +23,33 @@ THE SOFTWARE.
#include "lt.h" #include "lt.h"
LT_GarbageList *gbHead, *gbRover; /*
* Variables
*/
static LT_GarbageList *gbHead, *gbRover;
static FILE *parseFile; static FILE *parseFile;
static LT_InitInfo info; static LT_InitInfo info;
static iconv_t icDesc; static iconv_t icDesc;
static bool assertError = false; static bool assertError = false;
static const char *assertString; static const char *assertString;
static char *tokenTypes[] = { const char *LT_TkNames[] = {
// [marrub] So, this was an interesting bug. This was completely misordered from the enum. // [marrub] So, this was an interesting bug. This was completely misordered from the enum.
// As can be guessed, this caused many issues. Seriously, all of them. // As can be guessed, this caused many issues. Seriously, all of them.
"TOK_Colon", "TOK_Comma", "TOK_Div", "TOK_Mod", "TOK_Mul", "TOK_Query", "TOK_Colon", "TOK_Comma", "TOK_Div", "TOK_Mod", "TOK_Mul", "TOK_Query",
"TOK_BraceO", "TOK_BraceC", "TOK_BrackO", "TOK_BrackC", "TOK_ParenO", "TOK_ParenC", "TOK_BraceO", "TOK_BraceC", "TOK_BrackO", "TOK_BrackC", "TOK_ParenO", "TOK_ParenC",
"TOK_LnEnd", "TOK_Add2", "TOK_Add", "TOK_And2", "TOK_And", "TOK_CmpGE", "TOK_LnEnd", "TOK_Add2", "TOK_Add", "TOK_And2", "TOK_And", "TOK_CmpGE",
"TOK_ShR", "TOK_CmpGT", "TOK_CmpLE", "TOK_ShL", "TOK_CmpNE", "TOK_CmpLT", "TOK_ShR", "TOK_CmpGT", "TOK_CmpLE", "TOK_ShL", "TOK_CmpNE", "TOK_CmpLT",
"TOK_CmpEQ", "TOK_Equal", "TOK_Not", "TOK_OrI2", "TOK_OrI", "TOK_OrX2", "TOK_CmpEQ", "TOK_Equal", "TOK_Not", "TOK_OrI2", "TOK_OrI", "TOK_OrX2",
"TOK_OrX", "TOK_Sub2", "TOK_Sub", "TOK_String", "TOK_Charac", "TOK_Number", "TOK_OrX", "TOK_Sub2", "TOK_Sub", "TOK_String", "TOK_Charac", "TOK_Number",
"TOK_Identi", "TOK_EOF", "TOK_ChrSeq" "TOK_Identi", "TOK_EOF", "TOK_ChrSeq"
}; };
/*
* Functions
*/
static void LT_DoConvert(char **str) static void LT_DoConvert(char **str)
{ {
size_t i = strlen(*str); size_t i = strlen(*str);
@ -68,7 +75,7 @@ void LT_Init(LT_InitInfo initInfo)
if(icDesc == (iconv_t) -1) if(icDesc == (iconv_t) -1)
{ {
LT_Assert(true, "failure opening iconv"); LT_Assert(true, "LT_Init: Failure opening iconv");
} }
} }
@ -113,7 +120,9 @@ bool LT_Assert(bool assertion, const char *str)
{ {
assertError = true; assertError = true;
assertString = str; assertString = str;
fprintf(stderr, "LT_Assert: %s", str);
// [marrub] Apparently LOVE does not like printf
// fprintf(stderr, "LT_Assert: %s", str);
} }
return assertion; return assertion;
@ -133,7 +142,12 @@ bool LT_OpenFile(const char *filePath)
if(parseFile == NULL) if(parseFile == NULL)
{ {
perror("LT_OpenFile"); char *errorStr = malloc(256);
snprintf(errorStr, 256, "LT_OpenFile: %s", strerror(errno));
LT_Assert(true, errorStr);
free(errorStr);
return false; return false;
} }
@ -195,7 +209,6 @@ char *LT_ReadString(char term)
{ {
size_t i = 0, str_blocks = 1; size_t i = 0, str_blocks = 1;
char c, *str = malloc(TOKEN_STR_BLOCK_LENGTH); char c, *str = malloc(TOKEN_STR_BLOCK_LENGTH);
static char *emptyString = "";
while(true) while(true)
{ {
@ -206,16 +219,16 @@ char *LT_ReadString(char term)
break; break;
} }
if(LT_Assert(feof(parseFile) || c == '\n', "unterminated string literal")) if(LT_Assert(feof(parseFile) || c == '\n', "LT_ReadString: Unterminated string literal"))
{ {
return emptyString; return "";
} }
if(c == '\\' && info.escapeChars) if(c == '\\' && info.escapeChars)
{ {
fread(&c, 1, 1, parseFile); fread(&c, 1, 1, parseFile);
if(LT_Assert(feof(parseFile) || c == '\n', "unterminated string literal")) if(LT_Assert(feof(parseFile) || c == '\n', "LT_ReadString: Unterminated string literal"))
{ {
str[i] = '\0'; str[i] = '\0';
return str; return str;
@ -338,7 +351,9 @@ char *LT_Escaper(char *str, size_t pos, char escape)
} }
break; break;
default: LT_Assert(true, "unknown escape character"); break; default:
LT_Assert(true, "LT_Escaper: Unknown escape character");
break;
} }
return str; return str;
@ -353,7 +368,7 @@ LT_Token LT_GetToken()
if(feof(parseFile)) if(feof(parseFile))
{ {
tk.token = tokenTypes[TOK_EOF]; tk.token = LT_TkNames[TOK_EOF];
tk.string = NULL; tk.string = NULL;
tk.pos = ftell(parseFile); tk.pos = ftell(parseFile);
return tk; return tk;
@ -365,7 +380,7 @@ LT_Token LT_GetToken()
if(feof(parseFile)) // [marrub] This could have caused issues if there was whitespace before EOF. if(feof(parseFile)) // [marrub] This could have caused issues if there was whitespace before EOF.
{ {
tk.token = tokenTypes[TOK_EOF]; tk.token = LT_TkNames[TOK_EOF];
tk.string = NULL; tk.string = NULL;
tk.pos = ftell(parseFile); tk.pos = ftell(parseFile);
return tk; return tk;
@ -376,19 +391,19 @@ LT_Token LT_GetToken()
switch(c) switch(c)
{ {
case ':': tk.token = tokenTypes[TOK_Colon]; return tk; case ':': tk.token = LT_TkNames[TOK_Colon]; return tk;
case ',': tk.token = tokenTypes[TOK_Comma]; return tk; case ',': tk.token = LT_TkNames[TOK_Comma]; return tk;
case '/': tk.token = tokenTypes[TOK_Div]; return tk; case '/': tk.token = LT_TkNames[TOK_Div]; return tk;
case '%': tk.token = tokenTypes[TOK_Mod]; return tk; case '%': tk.token = LT_TkNames[TOK_Mod]; return tk;
case '*': tk.token = tokenTypes[TOK_Mul]; return tk; case '*': tk.token = LT_TkNames[TOK_Mul]; return tk;
case '?': tk.token = tokenTypes[TOK_Query]; return tk; case '?': tk.token = LT_TkNames[TOK_Query]; return tk;
case '{': tk.token = tokenTypes[TOK_BraceO]; return tk; case '{': tk.token = LT_TkNames[TOK_BraceO]; return tk;
case '}': tk.token = tokenTypes[TOK_BraceC]; return tk; case '}': tk.token = LT_TkNames[TOK_BraceC]; return tk;
case '[': tk.token = tokenTypes[TOK_BrackO]; return tk; case '[': tk.token = LT_TkNames[TOK_BrackO]; return tk;
case ']': tk.token = tokenTypes[TOK_BrackC]; return tk; case ']': tk.token = LT_TkNames[TOK_BrackC]; return tk;
case '(': tk.token = tokenTypes[TOK_ParenO]; return tk; case '(': tk.token = LT_TkNames[TOK_ParenO]; return tk;
case ')': tk.token = tokenTypes[TOK_ParenC]; return tk; case ')': tk.token = LT_TkNames[TOK_ParenC]; return tk;
case '\n': tk.token = tokenTypes[TOK_LnEnd]; return tk; case '\n': tk.token = LT_TkNames[TOK_LnEnd]; return tk;
// [marrub] Sorry, I wouldn't normally do a quick and dirty hack like this, // [marrub] Sorry, I wouldn't normally do a quick and dirty hack like this,
// but sometimes I really do care about my sanity. And wrists. // but sometimes I really do care about my sanity. And wrists.
@ -398,11 +413,11 @@ LT_Token LT_GetToken()
\ \
if(c == ch) \ if(c == ch) \
{ \ { \
tk.token = tokenTypes[t2]; \ tk.token = LT_TkNames[t2]; \
} \ } \
else \ else \
{ \ { \
tk.token = tokenTypes[t1]; \ tk.token = LT_TkNames[t1]; \
fseek(parseFile, -1, SEEK_CUR); \ fseek(parseFile, -1, SEEK_CUR); \
} \ } \
\ \
@ -423,15 +438,15 @@ LT_Token LT_GetToken()
if(c == '=') if(c == '=')
{ {
tk.token = tokenTypes[TOK_CmpGE]; tk.token = LT_TkNames[TOK_CmpGE];
} }
else if(c == '>') else if(c == '>')
{ {
tk.token = tokenTypes[TOK_ShR]; tk.token = LT_TkNames[TOK_ShR];
} }
else else
{ {
tk.token = tokenTypes[TOK_CmpGT]; tk.token = LT_TkNames[TOK_CmpGT];
fseek(parseFile, -1, SEEK_CUR); fseek(parseFile, -1, SEEK_CUR);
} }
@ -441,19 +456,19 @@ LT_Token LT_GetToken()
if(c == '=') if(c == '=')
{ {
tk.token = tokenTypes[TOK_CmpLE]; tk.token = LT_TkNames[TOK_CmpLE];
} }
else if(c == '<') else if(c == '<')
{ {
tk.token = tokenTypes[TOK_ShL]; tk.token = LT_TkNames[TOK_ShL];
} }
else if(c == '>') else if(c == '>')
{ {
tk.token = tokenTypes[TOK_CmpNE]; tk.token = LT_TkNames[TOK_CmpNE];
} }
else else
{ {
tk.token = tokenTypes[TOK_CmpLT]; tk.token = LT_TkNames[TOK_CmpLT];
fseek(parseFile, -1, SEEK_CUR); fseek(parseFile, -1, SEEK_CUR);
} }
@ -463,11 +478,11 @@ LT_Token LT_GetToken()
if(c == '=') if(c == '=')
{ {
tk.token = tokenTypes[TOK_CmpNE]; tk.token = LT_TkNames[TOK_CmpNE];
} }
else else
{ {
tk.token = tokenTypes[TOK_Not]; tk.token = LT_TkNames[TOK_Not];
fseek(parseFile, -1, SEEK_CUR); fseek(parseFile, -1, SEEK_CUR);
} }
@ -477,12 +492,12 @@ LT_Token LT_GetToken()
if(c == '=') if(c == '=')
{ {
tk.token = tokenTypes[TOK_CmpNE]; tk.token = LT_TkNames[TOK_CmpNE];
} }
else else
{ {
fseek(parseFile, -1, SEEK_CUR); fseek(parseFile, -1, SEEK_CUR);
LT_Assert(true, "syntax error"); // [marrub] Yet more error checking that was forgotten before. LT_Assert(true, "LT_GetToken: Syntax error"); // [marrub] Yet more error checking that was forgotten before.
} }
return tk; return tk;
@ -491,11 +506,11 @@ LT_Token LT_GetToken()
if(c == '"') if(c == '"')
{ {
tk.token = tokenTypes[TOK_String]; tk.token = LT_TkNames[TOK_String];
} }
else else
{ {
tk.token = tokenTypes[TOK_Charac]; tk.token = LT_TkNames[TOK_Charac];
} }
return tk; return tk;
@ -506,7 +521,7 @@ LT_Token LT_GetToken()
fseek(parseFile, -1, SEEK_CUR); fseek(parseFile, -1, SEEK_CUR);
tk.string = LT_ReadNumber(); tk.string = LT_ReadNumber();
tk.token = tokenTypes[TOK_Number]; tk.token = LT_TkNames[TOK_Number];
return tk; return tk;
} }
@ -547,7 +562,7 @@ LT_Token LT_GetToken()
fseek(parseFile, -1, SEEK_CUR); fseek(parseFile, -1, SEEK_CUR);
tk.string = gbRover->ptr; tk.string = gbRover->ptr;
tk.token = tokenTypes[TOK_Identi]; tk.token = LT_TkNames[TOK_Identi];
return tk; return tk;
} }
@ -560,7 +575,7 @@ LT_Token LT_GetToken()
gbRover->ptr = tk.string; gbRover->ptr = tk.string;
gbRover->next = NULL; gbRover->next = NULL;
tk.token = tokenTypes[TOK_ChrSeq]; tk.token = LT_TkNames[TOK_ChrSeq];
return tk; return tk;
} }

View File

@ -24,20 +24,47 @@ THE SOFTWARE.
#ifndef LOVETOKEN_LT_H #ifndef LOVETOKEN_LT_H
#define LOVETOKEN_LT_H #define LOVETOKEN_LT_H
/*
* Includes
*/
#include <stdio.h> #include <stdio.h>
#include <ctype.h> #include <ctype.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdbool.h> #include <stdbool.h>
#include <string.h> #include <string.h>
#include <errno.h>
#include <iconv.h> #include <iconv.h>
/*
* Definitions
*/
// [marrub] This can be changed if you have either a lot of very
// long strings, or a lot of very small strings, for optimization.
#define TOKEN_STR_BLOCK_LENGTH 512 #define TOKEN_STR_BLOCK_LENGTH 512
// [marrub] When using in FFI, remove this from the definitions // [marrub] When using in FFI, remove this from the declarations.
// Also redefine this for cross-platform. // Also make sure to redefine this for cross-platform.
#define LT_EXPORT __declspec(dllexport) #define LT_EXPORT __declspec(dllexport)
enum
{
TOK_Colon, TOK_Comma, TOK_Div, TOK_Mod, TOK_Mul,
TOK_Query, TOK_BraceO, TOK_BraceC, TOK_BrackO, TOK_BrackC,
TOK_ParenO, TOK_ParenC, TOK_LnEnd, TOK_Add2, TOK_Add,
TOK_And2, TOK_And, TOK_CmpGE, TOK_ShR, TOK_CmpGT,
TOK_CmpLE, TOK_ShL, TOK_CmpNE, TOK_CmpLT, TOK_CmpEQ,
TOK_Equal, TOK_Not, TOK_OrI2, TOK_OrI, TOK_OrX2,
TOK_OrX, TOK_Sub2, TOK_Sub, TOK_String, TOK_Charac,
TOK_Number, TOK_Identi, TOK_EOF, TOK_ChrSeq
};
/*
* Types
*/
typedef struct typedef struct
{ {
bool escapeChars; bool escapeChars;
@ -60,6 +87,16 @@ typedef struct
const char *str; const char *str;
} LT_AssertInfo; } LT_AssertInfo;
typedef struct LT_GarbageList_s
{
struct LT_GarbageList_s *next;
void *ptr;
} LT_GarbageList; // [marrub] Don't include this into FFI declarations.
/*
* Functions
*/
void LT_EXPORT LT_Init(LT_InitInfo initInfo); void LT_EXPORT LT_Init(LT_InitInfo initInfo);
void LT_EXPORT LT_Quit(); void LT_EXPORT LT_Quit();
bool LT_EXPORT LT_Assert(bool assertion, const char *str); bool LT_EXPORT LT_Assert(bool assertion, const char *str);
@ -73,56 +110,12 @@ char *LT_EXPORT LT_ReadString(char term);
char *LT_EXPORT LT_Escaper(char *str, size_t pos, char escape); char *LT_EXPORT LT_Escaper(char *str, size_t pos, char escape);
LT_Token LT_EXPORT LT_GetToken(); LT_Token LT_EXPORT LT_GetToken();
// [marrub] Don't include stuff below here into the FFI definitions /*
* Variables
* Don't include these into FFI declarations.
*/
typedef struct LT_GarbageList_s extern const char *LT_EXPORT LT_TkNames[];
{
struct LT_GarbageList_s *next;
void *ptr;
} LT_GarbageList;
enum
{
TOK_Colon,
TOK_Comma,
TOK_Div,
TOK_Mod,
TOK_Mul,
TOK_Query,
TOK_BraceO,
TOK_BraceC,
TOK_BrackO,
TOK_BrackC,
TOK_ParenO,
TOK_ParenC,
TOK_LnEnd,
TOK_Add2,
TOK_Add,
TOK_And2,
TOK_And,
TOK_CmpGE,
TOK_ShR,
TOK_CmpGT,
TOK_CmpLE,
TOK_ShL,
TOK_CmpNE,
TOK_CmpLT,
TOK_CmpEQ,
TOK_Equal,
TOK_Not,
TOK_OrI2,
TOK_OrI,
TOK_OrX2,
TOK_OrX,
TOK_Sub2,
TOK_Sub,
TOK_String,
TOK_Charac,
TOK_Number,
TOK_Identi,
TOK_EOF,
TOK_ChrSeq
};
#endif #endif