Skip to content

Commit

Permalink
feat: parse
Browse files Browse the repository at this point in the history
  • Loading branch information
kj455 committed Aug 12, 2024
1 parent 0e17391 commit 50e36bc
Show file tree
Hide file tree
Showing 9 changed files with 1,050 additions and 2 deletions.
181 changes: 181 additions & 0 deletions pkg/parse/data.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
package parse

import (
"fmt"
"strings"

"github.com/kj455/db/pkg/constant"
"github.com/kj455/db/pkg/query"
"github.com/kj455/db/pkg/record"
)

// QueryData holds data for the SQL select statement.
type QueryData struct {
Fields []string
Tables []string
Pred query.Predicate
}

// NewQueryData creates a new QueryData instance.
func NewQueryData(fields []string, tables []string, pred query.Predicate) *QueryData {
return &QueryData{
Fields: fields,
Tables: tables,
Pred: pred,
}
}

func (q *QueryData) String() string {
fields := strings.Join(q.Fields, ", ")
tables := strings.Join(q.Tables, ", ")
result := fmt.Sprintf("select %s from %s", fields, tables)
predString := q.Pred.String()
if predString == "" {
return result
}
return fmt.Sprintf("%s where %s", result, predString)
}

// InsertData is the data for the SQL "insert" statement.
type InsertData struct {
Table string
Fields []string
Vals []*constant.Const
}

func NewInsertData(table string, fields []string, vals []*constant.Const) *InsertData {
return &InsertData{
Table: table,
Fields: fields,
Vals: vals,
}
}

func (i *InsertData) String() string {
fields := strings.Join(i.Fields, ", ")
vals := make([]string, len(i.Vals))
for j, v := range i.Vals {
vals[j] = v.ToString()
}
values := strings.Join(vals, ", ")
return fmt.Sprintf("insert into %s(%s) values(%s)", i.Table, fields, values)
}

// ModifyData is the data for the SQL "update" statement.
type ModifyData struct {
Table string
Field string
Expr query.Expression
Pred query.Predicate
}

func NewModifyData(table, field string, expr query.Expression, pred query.Predicate) *ModifyData {
return &ModifyData{
Table: table,
Field: field,
Expr: expr,
Pred: pred,
}
}

func (m *ModifyData) String() string {
expr := m.Expr.ToString()
pred := m.Pred.String()
str := fmt.Sprintf("update %s set %s = %s", m.Table, m.Field, expr)
if pred == "" {
return str
}
return fmt.Sprintf("%s where %s", str, pred)
}

// DeleteData is the data for the SQL "delete" statement.
type DeleteData struct {
Table string
Pred query.Predicate
}

func NewDeleteData(table string, pred query.Predicate) *DeleteData {
return &DeleteData{
Table: table,
Pred: pred,
}
}

func (d *DeleteData) String() string {
pred := d.Pred.String()
return fmt.Sprintf("delete from %s where %s", d.Table, pred)
}

// CreateTableData is the data for the SQL "create table" statement.
type CreateTableData struct {
Table string
Schema record.Schema
}

func NewCreateTableData(table string, sch record.Schema) *CreateTableData {
return &CreateTableData{
Table: table,
Schema: sch,
}
}

func (c *CreateTableData) String() string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("create table %s(", c.Table))
for i, field := range c.Schema.Fields() {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(field)
typ, _ := c.Schema.Type(field)
switch typ {
case record.SCHEMA_TYPE_INTEGER:
sb.WriteString(" int")
case record.SCHEMA_TYPE_VARCHAR:
sb.WriteString(" varchar(")
length, _ := c.Schema.Length(field)
sb.WriteString(fmt.Sprintf("%d", length))
sb.WriteString(")")
}
}
sb.WriteString(")")
return sb.String()
}

// CreateViewData is the data for the SQL "create view" statement.
type CreateViewData struct {
ViewName string
data *QueryData
}

func NewCreateViewData(viewName string, data *QueryData) *CreateViewData {
return &CreateViewData{
ViewName: viewName,
data: data,
}
}

func (c *CreateViewData) ViewDef() string {
return c.data.String()
}

func (c *CreateViewData) String() string {
return fmt.Sprintf("create view %s as %s", c.ViewName, c.ViewDef())
}

// CreateIndexData is the parser for the "create index" statement.
type CreateIndexData struct {
Idx, Table, Field string
}

func NewCreateIndexData(idx, table, field string) *CreateIndexData {
return &CreateIndexData{
Idx: idx,
Table: table,
Field: field,
}
}

func (c *CreateIndexData) String() string {
return fmt.Sprintf("create index %s on %s(%s)", c.Idx, c.Table, c.Field)
}
206 changes: 206 additions & 0 deletions pkg/parse/lexer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
package parse

import (
"bufio"
"errors"
"strconv"
"strings"
)

type TokenType int

const (
Unknown TokenType = iota
EOF
Word
Number
Other
)

var (
errBadSyntax = errors.New("parse: bad syntax")
)

var keywords = []string{
"select",
"from",
"where",
"and",
"insert",
"into",
"values",
"delete",
"update",
"set",
"create",
"table",
"int",
"varchar",
"view",
"as",
"index",
"on",
}

// Lexer is the lexical analyzer.
type Lexer struct {
keywords map[string]bool
tok *bufio.Scanner
typ rune
sval string
nval int
}

func ScanSqlChars(data []byte, atEOF bool) (advance int, token []byte, err error) {
start := 0

for start < len(data) && (data[start] == ' ') {
start++
}

if start >= len(data) {
return
}

if data[start] == '(' || data[start] == ')' || data[start] == ',' || data[start] == '=' {
return start + 1, data[start : start+1], nil
}

// Find the end of the current token
for i := start; i < len(data); i++ {
if data[i] == ' ' || data[i] == '(' || data[i] == ')' || data[i] == ',' || data[i] == '=' {
if data[i] == '(' || data[i] == ')' || data[i] == ',' || data[i] == '=' {
return i, data[start:i], nil
}
return i + 1, data[start:i], nil
}
}

// If we're at the end of the data and there's still some token left
if atEOF && len(data) > start {
return len(data), data[start:], nil
}

return
}

// NewLexer creates a new lexical analyzer for SQL statement s.
func NewLexer(s string) *Lexer {
l := &Lexer{
keywords: initKeywords(),
tok: bufio.NewScanner(strings.NewReader(s)),
}
l.tok.Split(ScanSqlChars)
l.nextToken()
return l
}

// matchDelim returns true if the current token is the specified delimiter character.
func (l *Lexer) MatchDelim(d rune) bool {
// ttype == 'W and sval == d
// '=' のケースに対応
// if l.MatchKeyword(string(d)) && len(l.sval) == 1 {
// return rune(l.sval[0]) == d
// }
return d == rune(l.sval[0])
}

// matchIntConstant returns true if the current token is an integer.
func (l *Lexer) matchIntConstant() bool {
return l.typ == 'N' // Assuming 'N' represents a number
}

// matchStringConstant returns true if the current token is a string.
func (l *Lexer) MatchStringConstant() bool {
// return l.ttype == 'S' // Assuming 'S' represents a string
return rune(l.sval[0]) == '\''
}

// matchKeyword returns true if the current token is the specified keyword.
func (l *Lexer) MatchKeyword(w string) bool {
return l.typ == 'W' && l.sval == w // Assuming 'W' represents a word
}

// matchId returns true if the current token is a legal identifier.
func (l *Lexer) MatchId() bool {
return l.typ == 'W' && !l.keywords[l.sval]
}

// eatDelim throws an exception if the current token is not the specified delimiter. Otherwise, moves to the next token.
func (l *Lexer) EatDelim(d rune) error {
if !l.MatchDelim(d) {
return errBadSyntax
}
l.nextToken()
return nil
}

// eatIntConstant throws an exception if the current token is not an integer. Otherwise, returns that integer and moves to the next token.
func (l *Lexer) EatIntConstant() (int, error) {
if !l.matchIntConstant() {
return 0, errBadSyntax
}
i := l.nval
l.nextToken()
return i, nil
}

// eatStringConstant throws an exception if the current token is not a string. Otherwise, returns that string and moves to the next token.
func (l *Lexer) EatStringConstant() (string, error) {
if !l.MatchStringConstant() {
return "", errBadSyntax
}
s := l.sval
l.nextToken()
return s, nil
}

// eatKeyword throws an exception if the current token is not the specified keyword. Otherwise, moves to the next token.
func (l *Lexer) EatKeyword(w string) error {
if !l.MatchKeyword(w) {
return errBadSyntax
}
l.nextToken()
return nil
}

// eatId throws an exception if the current token is not an identifier. Otherwise, returns the identifier string and moves to the next token.
func (l *Lexer) EatId() (string, error) {
if !l.MatchId() {
return "", errBadSyntax
}
s := l.sval
l.nextToken()
return s, nil
}

func (l *Lexer) nextToken() {
if l.tok.Scan() {
// Here, we're making a simple assumption about token types. You might need to adjust this based on your actual needs.
token := l.tok.Text()
if _, err := strconv.Atoi(token); err == nil {
l.typ = 'N'
l.nval, _ = strconv.Atoi(token)
return
}
if strings.HasPrefix(token, "'") && strings.HasSuffix(token, "'") {
l.typ = 'S'
l.sval = token
// l.sval = token[1 : len(token)-1]
return
}
l.typ = 'W'
l.sval = strings.ToLower(token)
return
}
l.typ = -1 // FIXME
l.typ = '.'
}

func initKeywords() map[string]bool {
m := make(map[string]bool)
for _, k := range keywords {
m[k] = true
}
return m
}
Loading

0 comments on commit 50e36bc

Please sign in to comment.