aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Kavon <me+git@alexkavon.com>2024-01-22 00:35:44 -0500
committerAlexander Kavon <me+git@alexkavon.com>2024-01-22 00:35:44 -0500
commitd6fdb3a460eb228d7b1cd7870b7ef6c8c7391f0b (patch)
treef0fdc9963f9da9eae89e34ddbd401f8fc2cdd65c
parent857a7dd47a42faeee4c91e8089905b2ba7135bb7 (diff)
sqlboiler and generated models
-rw-r--r--sqlboiler.toml11
-rw-r--r--src/models/boil_main_test.go119
-rw-r--r--src/models/boil_queries.go38
-rw-r--r--src/models/boil_queries_test.go51
-rw-r--r--src/models/boil_relationship_test.go46
-rw-r--r--src/models/boil_suites_test.go81
-rw-r--r--src/models/boil_table_names.go10
-rw-r--r--src/models/boil_types.go52
-rw-r--r--src/models/boil_view_names.go7
-rw-r--r--src/models/psql_main_test.go231
-rw-r--r--src/models/psql_suites_test.go10
-rw-r--r--src/models/psql_upsert.go99
-rw-r--r--src/models/users.go1051
-rw-r--r--src/models/users_test.go732
14 files changed, 2538 insertions, 0 deletions
diff --git a/sqlboiler.toml b/sqlboiler.toml
new file mode 100644
index 0000000..b68e3e8
--- /dev/null
+++ b/sqlboiler.toml
@@ -0,0 +1,11 @@
+output = "src/models"
+wipe = true
+
+[psql]
+ dbname = "newsstand"
+ host = "localhost"
+ port = 9002
+ user = "newsstand"
+ pass = "newsstand"
+ sslmode = "disable"
+ blacklist = ["schema_version"]
diff --git a/src/models/boil_main_test.go b/src/models/boil_main_test.go
new file mode 100644
index 0000000..1ebd97f
--- /dev/null
+++ b/src/models/boil_main_test.go
@@ -0,0 +1,119 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import (
+ "database/sql"
+ "flag"
+ "fmt"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/spf13/viper"
+ "github.com/volatiletech/sqlboiler/v4/boil"
+)
+
+var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
+var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
+
+const outputDirDepth = 2
+
+var (
+ dbMain tester
+)
+
+type tester interface {
+ setup() error
+ conn() (*sql.DB, error)
+ teardown() error
+}
+
+func TestMain(m *testing.M) {
+ if dbMain == nil {
+ fmt.Println("no dbMain tester interface was ready")
+ os.Exit(-1)
+ }
+
+ rand.Seed(time.Now().UnixNano())
+
+ flag.Parse()
+
+ var err error
+
+ // Load configuration
+ err = initViper()
+ if err != nil {
+ fmt.Println("unable to load config file")
+ os.Exit(-2)
+ }
+
+ // Set DebugMode so we can see generated sql statements
+ boil.DebugMode = *flagDebugMode
+
+ if err = dbMain.setup(); err != nil {
+ fmt.Println("Unable to execute setup:", err)
+ os.Exit(-4)
+ }
+
+ conn, err := dbMain.conn()
+ if err != nil {
+ fmt.Println("failed to get connection:", err)
+ }
+
+ var code int
+ boil.SetDB(conn)
+ code = m.Run()
+
+ if err = dbMain.teardown(); err != nil {
+ fmt.Println("Unable to execute teardown:", err)
+ os.Exit(-5)
+ }
+
+ os.Exit(code)
+}
+
+func initViper() error {
+ if flagConfigFile != nil && *flagConfigFile != "" {
+ viper.SetConfigFile(*flagConfigFile)
+ if err := viper.ReadInConfig(); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ var err error
+
+ viper.SetConfigName("sqlboiler")
+
+ configHome := os.Getenv("XDG_CONFIG_HOME")
+ homePath := os.Getenv("HOME")
+ wd, err := os.Getwd()
+ if err != nil {
+ wd = strings.Repeat("../", outputDirDepth)
+ } else {
+ wd = wd + strings.Repeat("/..", outputDirDepth)
+ }
+
+ configPaths := []string{wd}
+ if len(configHome) > 0 {
+ configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
+ } else {
+ configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
+ }
+
+ for _, p := range configPaths {
+ viper.AddConfigPath(p)
+ }
+
+ // Ignore errors here, fall back to defaults and validation to provide errs
+ _ = viper.ReadInConfig()
+ viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
+ viper.AutomaticEnv()
+
+ return nil
+}
diff --git a/src/models/boil_queries.go b/src/models/boil_queries.go
new file mode 100644
index 0000000..e8db654
--- /dev/null
+++ b/src/models/boil_queries.go
@@ -0,0 +1,38 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import (
+ "regexp"
+
+ "github.com/volatiletech/sqlboiler/v4/drivers"
+ "github.com/volatiletech/sqlboiler/v4/queries"
+ "github.com/volatiletech/sqlboiler/v4/queries/qm"
+)
+
+var dialect = drivers.Dialect{
+ LQ: 0x22,
+ RQ: 0x22,
+
+ UseIndexPlaceholders: true,
+ UseLastInsertID: false,
+ UseSchema: false,
+ UseDefaultKeyword: true,
+ UseAutoColumns: false,
+ UseTopClause: false,
+ UseOutputClause: false,
+ UseCaseWhenExistsClause: false,
+}
+
+// This is a dummy variable to prevent unused regexp import error
+var _ = &regexp.Regexp{}
+
+// NewQuery initializes a new Query using the passed in QueryMods
+func NewQuery(mods ...qm.QueryMod) *queries.Query {
+ q := &queries.Query{}
+ queries.SetDialect(q, &dialect)
+ qm.Apply(q, mods...)
+
+ return q
+}
diff --git a/src/models/boil_queries_test.go b/src/models/boil_queries_test.go
new file mode 100644
index 0000000..2d256bd
--- /dev/null
+++ b/src/models/boil_queries_test.go
@@ -0,0 +1,51 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math/rand"
+ "regexp"
+
+ "github.com/volatiletech/sqlboiler/v4/boil"
+)
+
+var dbNameRand *rand.Rand
+
+func MustTx(transactor boil.ContextTransactor, err error) boil.ContextTransactor {
+ if err != nil {
+ panic(fmt.Sprintf("Cannot create a transactor: %s", err))
+ }
+ return transactor
+}
+
+func newFKeyDestroyer(regex *regexp.Regexp, reader io.Reader) io.Reader {
+ return &fKeyDestroyer{
+ reader: reader,
+ rgx: regex,
+ }
+}
+
+type fKeyDestroyer struct {
+ reader io.Reader
+ buf *bytes.Buffer
+ rgx *regexp.Regexp
+}
+
+func (f *fKeyDestroyer) Read(b []byte) (int, error) {
+ if f.buf == nil {
+ all, err := io.ReadAll(f.reader)
+ if err != nil {
+ return 0, err
+ }
+
+ all = bytes.Replace(all, []byte{'\r', '\n'}, []byte{'\n'}, -1)
+ all = f.rgx.ReplaceAll(all, []byte{})
+ f.buf = bytes.NewBuffer(all)
+ }
+
+ return f.buf.Read(b)
+}
diff --git a/src/models/boil_relationship_test.go b/src/models/boil_relationship_test.go
new file mode 100644
index 0000000..c4d831d
--- /dev/null
+++ b/src/models/boil_relationship_test.go
@@ -0,0 +1,46 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import "testing"
+
+// TestToOne tests cannot be run in parallel
+// or deadlocks can occur.
+func TestToOne(t *testing.T) {}
+
+// TestOneToOne tests cannot be run in parallel
+// or deadlocks can occur.
+func TestOneToOne(t *testing.T) {}
+
+// TestToMany tests cannot be run in parallel
+// or deadlocks can occur.
+func TestToMany(t *testing.T) {}
+
+// TestToOneSet tests cannot be run in parallel
+// or deadlocks can occur.
+func TestToOneSet(t *testing.T) {}
+
+// TestToOneRemove tests cannot be run in parallel
+// or deadlocks can occur.
+func TestToOneRemove(t *testing.T) {}
+
+// TestOneToOneSet tests cannot be run in parallel
+// or deadlocks can occur.
+func TestOneToOneSet(t *testing.T) {}
+
+// TestOneToOneRemove tests cannot be run in parallel
+// or deadlocks can occur.
+func TestOneToOneRemove(t *testing.T) {}
+
+// TestToManyAdd tests cannot be run in parallel
+// or deadlocks can occur.
+func TestToManyAdd(t *testing.T) {}
+
+// TestToManySet tests cannot be run in parallel
+// or deadlocks can occur.
+func TestToManySet(t *testing.T) {}
+
+// TestToManyRemove tests cannot be run in parallel
+// or deadlocks can occur.
+func TestToManyRemove(t *testing.T) {}
diff --git a/src/models/boil_suites_test.go b/src/models/boil_suites_test.go
new file mode 100644
index 0000000..a817084
--- /dev/null
+++ b/src/models/boil_suites_test.go
@@ -0,0 +1,81 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import "testing"
+
+// This test suite runs each operation test in parallel.
+// Example, if your database has 3 tables, the suite will run:
+// table1, table2 and table3 Delete in parallel
+// table1, table2 and table3 Insert in parallel, and so forth.
+// It does NOT run each operation group in parallel.
+// Separating the tests thusly grants avoidance of Postgres deadlocks.
+func TestParent(t *testing.T) {
+ t.Run("Users", testUsers)
+}
+
+func TestDelete(t *testing.T) {
+ t.Run("Users", testUsersDelete)
+}
+
+func TestQueryDeleteAll(t *testing.T) {
+ t.Run("Users", testUsersQueryDeleteAll)
+}
+
+func TestSliceDeleteAll(t *testing.T) {
+ t.Run("Users", testUsersSliceDeleteAll)
+}
+
+func TestExists(t *testing.T) {
+ t.Run("Users", testUsersExists)
+}
+
+func TestFind(t *testing.T) {
+ t.Run("Users", testUsersFind)
+}
+
+func TestBind(t *testing.T) {
+ t.Run("Users", testUsersBind)
+}
+
+func TestOne(t *testing.T) {
+ t.Run("Users", testUsersOne)
+}
+
+func TestAll(t *testing.T) {
+ t.Run("Users", testUsersAll)
+}
+
+func TestCount(t *testing.T) {
+ t.Run("Users", testUsersCount)
+}
+
+func TestHooks(t *testing.T) {
+ t.Run("Users", testUsersHooks)
+}
+
+func TestInsert(t *testing.T) {
+ t.Run("Users", testUsersInsert)
+ t.Run("Users", testUsersInsertWhitelist)
+}
+
+func TestReload(t *testing.T) {
+ t.Run("Users", testUsersReload)
+}
+
+func TestReloadAll(t *testing.T) {
+ t.Run("Users", testUsersReloadAll)
+}
+
+func TestSelect(t *testing.T) {
+ t.Run("Users", testUsersSelect)
+}
+
+func TestUpdate(t *testing.T) {
+ t.Run("Users", testUsersUpdate)
+}
+
+func TestSliceUpdateAll(t *testing.T) {
+ t.Run("Users", testUsersSliceUpdateAll)
+}
diff --git a/src/models/boil_table_names.go b/src/models/boil_table_names.go
new file mode 100644
index 0000000..b98dc13
--- /dev/null
+++ b/src/models/boil_table_names.go
@@ -0,0 +1,10 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+var TableNames = struct {
+ Users string
+}{
+ Users: "users",
+}
diff --git a/src/models/boil_types.go b/src/models/boil_types.go
new file mode 100644
index 0000000..f1157ed
--- /dev/null
+++ b/src/models/boil_types.go
@@ -0,0 +1,52 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import (
+ "strconv"
+
+ "github.com/friendsofgo/errors"
+ "github.com/volatiletech/sqlboiler/v4/boil"
+ "github.com/volatiletech/strmangle"
+)
+
+// M type is for providing columns and column values to UpdateAll.
+type M map[string]interface{}
+
+// ErrSyncFail occurs during insert when the record could not be retrieved in
+// order to populate default value information. This usually happens when LastInsertId
+// fails or there was a primary key configuration that was not resolvable.
+var ErrSyncFail = errors.New("models: failed to synchronize data after insert")
+
+type insertCache struct {
+ query string
+ retQuery string
+ valueMapping []uint64
+ retMapping []uint64
+}
+
+type updateCache struct {
+ query string
+ valueMapping []uint64
+}
+
+func makeCacheKey(cols boil.Columns, nzDefaults []string) string {
+ buf := strmangle.GetBuffer()
+
+ buf.WriteString(strconv.Itoa(cols.Kind))
+ for _, w := range cols.Cols {
+ buf.WriteString(w)
+ }
+
+ if len(nzDefaults) != 0 {
+ buf.WriteByte('.')
+ }
+ for _, nz := range nzDefaults {
+ buf.WriteString(nz)
+ }
+
+ str := buf.String()
+ strmangle.PutBuffer(buf)
+ return str
+}
diff --git a/src/models/boil_view_names.go b/src/models/boil_view_names.go
new file mode 100644
index 0000000..d8e2111
--- /dev/null
+++ b/src/models/boil_view_names.go
@@ -0,0 +1,7 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+var ViewNames = struct {
+}{}
diff --git a/src/models/psql_main_test.go b/src/models/psql_main_test.go
new file mode 100644
index 0000000..63c615f
--- /dev/null
+++ b/src/models/psql_main_test.go
@@ -0,0 +1,231 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import (
+ "bytes"
+ "database/sql"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "regexp"
+ "strings"
+
+ "github.com/friendsofgo/errors"
+ "github.com/kat-co/vala"
+ _ "github.com/lib/pq"
+ "github.com/spf13/viper"
+ "github.com/volatiletech/randomize"
+ "github.com/volatiletech/sqlboiler/v4/drivers/sqlboiler-psql/driver"
+)
+
+var rgxPGFkey = regexp.MustCompile(`(?m)^ALTER TABLE .*\n\s+ADD CONSTRAINT .*? FOREIGN KEY .*?;\n`)
+
+type pgTester struct {
+ dbConn *sql.DB
+
+ dbName string
+ host string
+ user string
+ pass string
+ sslmode string
+ port int
+
+ pgPassFile string
+
+ testDBName string
+ skipSQLCmd bool
+}
+
+func init() {
+ dbMain = &pgTester{}
+}
+
+// setup dumps the database schema and imports it into a temporary randomly
+// generated test database so that tests can be run against it using the
+// generated sqlboiler ORM package.
+func (p *pgTester) setup() error {
+ var err error
+
+ viper.SetDefault("psql.schema", "public")
+ viper.SetDefault("psql.port", 5432)
+ viper.SetDefault("psql.sslmode", "require")
+
+ p.dbName = viper.GetString("psql.dbname")
+ p.host = viper.GetString("psql.host")
+ p.user = viper.GetString("psql.user")
+ p.pass = viper.GetString("psql.pass")
+ p.port = viper.GetInt("psql.port")
+ p.sslmode = viper.GetString("psql.sslmode")
+ p.testDBName = viper.GetString("psql.testdbname")
+ p.skipSQLCmd = viper.GetBool("psql.skipsqlcmd")
+
+ err = vala.BeginValidation().Validate(
+ vala.StringNotEmpty(p.user, "psql.user"),
+ vala.StringNotEmpty(p.host, "psql.host"),
+ vala.Not(vala.Equals(p.port, 0, "psql.port")),
+ vala.StringNotEmpty(p.dbName, "psql.dbname"),
+ vala.StringNotEmpty(p.sslmode, "psql.sslmode"),
+ ).Check()
+
+ if err != nil {
+ return err
+ }
+
+ // if no testing DB passed
+ if len(p.testDBName) == 0 {
+ // Create a randomized db name.
+ p.testDBName = randomize.StableDBName(p.dbName)
+ }
+
+ if err = p.makePGPassFile(); err != nil {
+ return err
+ }
+
+ if !p.skipSQLCmd {
+ if err = p.dropTestDB(); err != nil {
+ return err
+ }
+ if err = p.createTestDB(); err != nil {
+ return err
+ }
+
+ dumpCmd := exec.Command("pg_dump", "--schema-only", p.dbName)
+ dumpCmd.Env = append(os.Environ(), p.pgEnv()...)
+ createCmd := exec.Command("psql", p.testDBName)
+ createCmd.Env = append(os.Environ(), p.pgEnv()...)
+
+ r, w := io.Pipe()
+ dumpCmdStderr := &bytes.Buffer{}
+ createCmdStderr := &bytes.Buffer{}
+
+ dumpCmd.Stdout = w
+ dumpCmd.Stderr = dumpCmdStderr
+
+ createCmd.Stdin = newFKeyDestroyer(rgxPGFkey, r)
+ createCmd.Stderr = createCmdStderr
+
+ if err = dumpCmd.Start(); err != nil {
+ return errors.Wrap(err, "failed to start pg_dump command")
+ }
+ if err = createCmd.Start(); err != nil {
+ return errors.Wrap(err, "failed to start psql command")
+ }
+
+ if err = dumpCmd.Wait(); err != nil {
+ fmt.Println(err)
+ fmt.Println(dumpCmdStderr.String())
+ return errors.Wrap(err, "failed to wait for pg_dump command")
+ }
+
+ _ = w.Close() // After dumpCmd is done, close the write end of the pipe
+
+ if err = createCmd.Wait(); err != nil {
+ fmt.Println(err)
+ fmt.Println(createCmdStderr.String())
+ return errors.Wrap(err, "failed to wait for psql command")
+ }
+ }
+
+ return nil
+}
+
+func (p *pgTester) runCmd(stdin, command string, args ...string) error {
+ cmd := exec.Command(command, args...)
+ cmd.Env = append(os.Environ(), p.pgEnv()...)
+
+ if len(stdin) != 0 {
+ cmd.Stdin = strings.NewReader(stdin)
+ }
+
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ if err := cmd.Run(); err != nil {
+ fmt.Println("failed running:", command, args)
+ fmt.Println(stdout.String())
+ fmt.Println(stderr.String())
+ return err
+ }
+
+ return nil
+}
+
+func (p *pgTester) pgEnv() []string {
+ return []string{
+ fmt.Sprintf("PGHOST=%s", p.host),
+ fmt.Sprintf("PGPORT=%d", p.port),
+ fmt.Sprintf("PGUSER=%s", p.user),
+ fmt.Sprintf("PGPASSFILE=%s", p.pgPassFile),
+ }
+}
+
+func (p *pgTester) makePGPassFile() error {
+ tmp, err := os.CreateTemp("", "pgpass")
+ if err != nil {
+ return errors.Wrap(err, "failed to create option file")
+ }
+
+ fmt.Fprintf(tmp, "%s:%d:postgres:%s", p.host, p.port, p.user)
+ if len(p.pass) != 0 {
+ fmt.Fprintf(tmp, ":%s", p.pass)
+ }
+ fmt.Fprintln(tmp)
+
+ fmt.Fprintf(tmp, "%s:%d:%s:%s", p.host, p.port, p.dbName, p.user)
+ if len(p.pass) != 0 {
+ fmt.Fprintf(tmp, ":%s", p.pass)
+ }
+ fmt.Fprintln(tmp)
+
+ fmt.Fprintf(tmp, "%s:%d:%s:%s", p.host, p.port, p.testDBName, p.user)
+ if len(p.pass) != 0 {
+ fmt.Fprintf(tmp, ":%s", p.pass)
+ }
+ fmt.Fprintln(tmp)
+
+ p.pgPassFile = tmp.Name()
+ return tmp.Close()
+}
+
+func (p *pgTester) createTestDB() error {
+ return p.runCmd("", "createdb", p.testDBName)
+}
+
+func (p *pgTester) dropTestDB() error {
+ return p.runCmd("", "dropdb", "--if-exists", p.testDBName)
+}
+
+// teardown executes cleanup tasks when the tests finish running
+func (p *pgTester) teardown() error {
+ var err error
+ if err = p.dbConn.Close(); err != nil {
+ return err
+ }
+ p.dbConn = nil
+
+ if !p.skipSQLCmd {
+ if err = p.dropTestDB(); err != nil {
+ return err
+ }
+ }
+
+ return os.Remove(p.pgPassFile)
+}
+
+func (p *pgTester) conn() (*sql.DB, error) {
+ if p.dbConn != nil {
+ return p.dbConn, nil
+ }
+
+ var err error
+ p.dbConn, err = sql.Open("postgres", driver.PSQLBuildQueryString(p.user, p.pass, p.testDBName, p.host, p.port, p.sslmode))
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dbConn, nil
+}
diff --git a/src/models/psql_suites_test.go b/src/models/psql_suites_test.go
new file mode 100644
index 0000000..9b30f06
--- /dev/null
+++ b/src/models/psql_suites_test.go
@@ -0,0 +1,10 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import "testing"
+
+func TestUpsert(t *testing.T) {
+ t.Run("Users", testUsersUpsert)
+}
diff --git a/src/models/psql_upsert.go b/src/models/psql_upsert.go
new file mode 100644
index 0000000..d71b7d6
--- /dev/null
+++ b/src/models/psql_upsert.go
@@ -0,0 +1,99 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/volatiletech/sqlboiler/v4/drivers"
+ "github.com/volatiletech/strmangle"
+)
+
+type UpsertOptions struct {
+ conflictTarget string
+ updateSet string
+}
+
+type UpsertOptionFunc func(o *UpsertOptions)
+
+func UpsertConflictTarget(conflictTarget string) UpsertOptionFunc {
+ return func(o *UpsertOptions) {
+ o.conflictTarget = conflictTarget
+ }
+}
+
+func UpsertUpdateSet(updateSet string) UpsertOptionFunc {
+ return func(o *UpsertOptions) {
+ o.updateSet = updateSet
+ }
+}
+
+// buildUpsertQueryPostgres builds a SQL statement string using the upsertData provided.
+func buildUpsertQueryPostgres(dia drivers.Dialect, tableName string, updateOnConflict bool, ret, update, conflict, whitelist []string, opts ...UpsertOptionFunc) string {
+ conflict = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, conflict)
+ whitelist = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, whitelist)
+ ret = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, ret)
+
+ upsertOpts := &UpsertOptions{}
+ for _, o := range opts {
+ o(upsertOpts)
+ }
+
+ buf := strmangle.GetBuffer()
+ defer strmangle.PutBuffer(buf)
+
+ columns := "DEFAULT VALUES"
+ if len(whitelist) != 0 {
+ columns = fmt.Sprintf("(%s) VALUES (%s)",
+ strings.Join(whitelist, ", "),
+ strmangle.Placeholders(dia.UseIndexPlaceholders, len(whitelist), 1, 1))
+ }
+
+ fmt.Fprintf(
+ buf,
+ "INSERT INTO %s %s ON CONFLICT ",
+ tableName,
+ columns,
+ )
+
+ if upsertOpts.conflictTarget != "" {
+ buf.WriteString(upsertOpts.conflictTarget)
+ } else if len(conflict) != 0 {
+ buf.WriteByte('(')
+ buf.WriteString(strings.Join(conflict, ", "))
+ buf.WriteByte(')')
+ }
+ buf.WriteByte(' ')
+
+ if !updateOnConflict || len(update) == 0 {
+ buf.WriteString("DO NOTHING")
+ } else {
+ buf.WriteString("DO UPDATE SET ")
+
+ if upsertOpts.updateSet != "" {
+ buf.WriteString(upsertOpts.updateSet)
+ } else {
+ for i, v := range update {
+ if len(v) == 0 {
+ continue
+ }
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ quoted := strmangle.IdentQuote(dia.LQ, dia.RQ, v)
+ buf.WriteString(quoted)
+ buf.WriteString(" = EXCLUDED.")
+ buf.WriteString(quoted)
+ }
+ }
+ }
+
+ if len(ret) != 0 {
+ buf.WriteString(" RETURNING ")
+ buf.WriteString(strings.Join(ret, ", "))
+ }
+
+ return buf.String()
+}
diff --git a/src/models/users.go b/src/models/users.go
new file mode 100644
index 0000000..666247a
--- /dev/null
+++ b/src/models/users.go
@@ -0,0 +1,1051 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/friendsofgo/errors"
+ "github.com/volatiletech/sqlboiler/v4/boil"
+ "github.com/volatiletech/sqlboiler/v4/queries"
+ "github.com/volatiletech/sqlboiler/v4/queries/qm"
+ "github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
+ "github.com/volatiletech/strmangle"
+)
+
+// User is an object representing the database table.
+type User struct {
+ ID int `boil:"id" json:"id" toml:"id" yaml:"id"`
+ Username string `boil:"username" json:"username" toml:"username" yaml:"username"`
+ Secret string `boil:"secret" json:"secret" toml:"secret" yaml:"secret"`
+ Email string `boil:"email" json:"email" toml:"email" yaml:"email"`
+ CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"`
+ UpdatedAt time.Time `boil:"updated_at" json:"updated_at" toml:"updated_at" yaml:"updated_at"`
+
+ R *userR `boil:"-" json:"-" toml:"-" yaml:"-"`
+ L userL `boil:"-" json:"-" toml:"-" yaml:"-"`
+}
+
+var UserColumns = struct {
+ ID string
+ Username string
+ Secret string
+ Email string
+ CreatedAt string
+ UpdatedAt string
+}{
+ ID: "id",
+ Username: "username",
+ Secret: "secret",
+ Email: "email",
+ CreatedAt: "created_at",
+ UpdatedAt: "updated_at",
+}
+
+var UserTableColumns = struct {
+ ID string
+ Username string
+ Secret string
+ Email string
+ CreatedAt string
+ UpdatedAt string
+}{
+ ID: "users.id",
+ Username: "users.username",
+ Secret: "users.secret",
+ Email: "users.email",
+ CreatedAt: "users.created_at",
+ UpdatedAt: "users.updated_at",
+}
+
+// Generated where
+
+type whereHelperint struct{ field string }
+
+func (w whereHelperint) EQ(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
+func (w whereHelperint) NEQ(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
+func (w whereHelperint) LT(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
+func (w whereHelperint) LTE(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
+func (w whereHelperint) GT(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
+func (w whereHelperint) GTE(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
+func (w whereHelperint) IN(slice []int) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
+}
+func (w whereHelperint) NIN(slice []int) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
+}
+
+type whereHelperstring struct{ field string }
+
+func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
+func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
+func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
+func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
+func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
+func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
+func (w whereHelperstring) LIKE(x string) qm.QueryMod { return qm.Where(w.field+" LIKE ?", x) }
+func (w whereHelperstring) NLIKE(x string) qm.QueryMod { return qm.Where(w.field+" NOT LIKE ?", x) }
+func (w whereHelperstring) ILIKE(x string) qm.QueryMod { return qm.Where(w.field+" ILIKE ?", x) }
+func (w whereHelperstring) NILIKE(x string) qm.QueryMod { return qm.Where(w.field+" NOT ILIKE ?", x) }
+func (w whereHelperstring) IN(slice []string) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
+}
+func (w whereHelperstring) NIN(slice []string) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
+}
+
+type whereHelpertime_Time struct{ field string }
+
+func (w whereHelpertime_Time) EQ(x time.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.EQ, x)
+}
+func (w whereHelpertime_Time) NEQ(x time.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.NEQ, x)
+}
+func (w whereHelpertime_Time) LT(x time.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.LT, x)
+}
+func (w whereHelpertime_Time) LTE(x time.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.LTE, x)
+}
+func (w whereHelpertime_Time) GT(x time.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.GT, x)
+}
+func (w whereHelpertime_Time) GTE(x time.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.GTE, x)
+}
+
+var UserWhere = struct {
+ ID whereHelperint
+ Username whereHelperstring
+ Secret whereHelperstring
+ Email whereHelperstring
+ CreatedAt whereHelpertime_Time
+ UpdatedAt whereHelpertime_Time
+}{
+ ID: whereHelperint{field: "\"users\".\"id\""},
+ Username: whereHelperstring{field: "\"users\".\"username\""},
+ Secret: whereHelperstring{field: "\"users\".\"secret\""},
+ Email: whereHelperstring{field: "\"users\".\"email\""},
+ CreatedAt: whereHelpertime_Time{field: "\"users\".\"created_at\""},
+ UpdatedAt: whereHelpertime_Time{field: "\"users\".\"updated_at\""},
+}
+
+// UserRels is where relationship names are stored.
+var UserRels = struct {
+}{}
+
+// userR is where relationships are stored.
+type userR struct {
+}
+
+// NewStruct creates a new relationship struct
+func (*userR) NewStruct() *userR {
+ return &userR{}
+}
+
+// userL is where Load methods for each relationship are stored.
+type userL struct{}
+
+var (
+ userAllColumns = []string{"id", "username", "secret", "email", "created_at", "updated_at"}
+ userColumnsWithoutDefault = []string{"username", "secret", "email"}
+ userColumnsWithDefault = []string{"id", "created_at", "updated_at"}
+ userPrimaryKeyColumns = []string{"id"}
+ userGeneratedColumns = []string{}
+)
+
+type (
+ // UserSlice is an alias for a slice of pointers to User.
+ // This should almost always be used instead of []User.
+ UserSlice []*User
+ // UserHook is the signature for custom User hook methods
+ UserHook func(context.Context, boil.ContextExecutor, *User) error
+
+ userQuery struct {
+ *queries.Query
+ }
+)
+
+// Cache for insert, update and upsert
+var (
+ userType = reflect.TypeOf(&User{})
+ userMapping = queries.MakeStructMapping(userType)
+ userPrimaryKeyMapping, _ = queries.BindMapping(userType, userMapping, userPrimaryKeyColumns)
+ userInsertCacheMut sync.RWMutex
+ userInsertCache = make(map[string]insertCache)
+ userUpdateCacheMut sync.RWMutex
+ userUpdateCache = make(map[string]updateCache)
+ userUpsertCacheMut sync.RWMutex
+ userUpsertCache = make(map[string]insertCache)
+)
+
+var (
+ // Force time package dependency for automated UpdatedAt/CreatedAt.
+ _ = time.Second
+ // Force qmhelper dependency for where clause generation (which doesn't
+ // always happen)
+ _ = qmhelper.Where
+)
+
+var userAfterSelectMu sync.Mutex
+var userAfterSelectHooks []UserHook
+
+var userBeforeInsertMu sync.Mutex
+var userBeforeInsertHooks []UserHook
+var userAfterInsertMu sync.Mutex
+var userAfterInsertHooks []UserHook
+
+var userBeforeUpdateMu sync.Mutex
+var userBeforeUpdateHooks []UserHook
+var userAfterUpdateMu sync.Mutex
+var userAfterUpdateHooks []UserHook
+
+var userBeforeDeleteMu sync.Mutex
+var userBeforeDeleteHooks []UserHook
+var userAfterDeleteMu sync.Mutex
+var userAfterDeleteHooks []UserHook
+
+var userBeforeUpsertMu sync.Mutex
+var userBeforeUpsertHooks []UserHook
+var userAfterUpsertMu sync.Mutex
+var userAfterUpsertHooks []UserHook
+
+// doAfterSelectHooks executes all "after Select" hooks.
+func (o *User) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userAfterSelectHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeInsertHooks executes all "before insert" hooks.
+func (o *User) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userBeforeInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterInsertHooks executes all "after Insert" hooks.
+func (o *User) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userAfterInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpdateHooks executes all "before Update" hooks.
+func (o *User) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userBeforeUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpdateHooks executes all "after Update" hooks.
+func (o *User) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userAfterUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeDeleteHooks executes all "before Delete" hooks.
+func (o *User) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userBeforeDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterDeleteHooks executes all "after Delete" hooks.
+func (o *User) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userAfterDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpsertHooks executes all "before Upsert" hooks.
+func (o *User) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userBeforeUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpsertHooks executes all "after Upsert" hooks.
+func (o *User) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range userAfterUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// AddUserHook registers your hook function for all future operations.
+func AddUserHook(hookPoint boil.HookPoint, userHook UserHook) {
+ switch hookPoint {
+ case boil.AfterSelectHook:
+ userAfterSelectMu.Lock()
+ userAfterSelectHooks = append(userAfterSelectHooks, userHook)
+ userAfterSelectMu.Unlock()
+ case boil.BeforeInsertHook:
+ userBeforeInsertMu.Lock()
+ userBeforeInsertHooks = append(userBeforeInsertHooks, userHook)
+ userBeforeInsertMu.Unlock()
+ case boil.AfterInsertHook:
+ userAfterInsertMu.Lock()
+ userAfterInsertHooks = append(userAfterInsertHooks, userHook)
+ userAfterInsertMu.Unlock()
+ case boil.BeforeUpdateHook:
+ userBeforeUpdateMu.Lock()
+ userBeforeUpdateHooks = append(userBeforeUpdateHooks, userHook)
+ userBeforeUpdateMu.Unlock()
+ case boil.AfterUpdateHook:
+ userAfterUpdateMu.Lock()
+ userAfterUpdateHooks = append(userAfterUpdateHooks, userHook)
+ userAfterUpdateMu.Unlock()
+ case boil.BeforeDeleteHook:
+ userBeforeDeleteMu.Lock()
+ userBeforeDeleteHooks = append(userBeforeDeleteHooks, userHook)
+ userBeforeDeleteMu.Unlock()
+ case boil.AfterDeleteHook:
+ userAfterDeleteMu.Lock()
+ userAfterDeleteHooks = append(userAfterDeleteHooks, userHook)
+ userAfterDeleteMu.Unlock()
+ case boil.BeforeUpsertHook:
+ userBeforeUpsertMu.Lock()
+ userBeforeUpsertHooks = append(userBeforeUpsertHooks, userHook)
+ userBeforeUpsertMu.Unlock()
+ case boil.AfterUpsertHook:
+ userAfterUpsertMu.Lock()
+ userAfterUpsertHooks = append(userAfterUpsertHooks, userHook)
+ userAfterUpsertMu.Unlock()
+ }
+}
+
+// One returns a single user record from the query.
+func (q userQuery) One(ctx context.Context, exec boil.ContextExecutor) (*User, error) {
+ o := &User{}
+
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Bind(ctx, exec, o)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "models: failed to execute a one query for users")
+ }
+
+ if err := o.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+
+ return o, nil
+}
+
+// All returns all User records from the query.
+func (q userQuery) All(ctx context.Context, exec boil.ContextExecutor) (UserSlice, error) {
+ var o []*User
+
+ err := q.Bind(ctx, exec, &o)
+ if err != nil {
+ return nil, errors.Wrap(err, "models: failed to assign all query results to User slice")
+ }
+
+ if len(userAfterSelectHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+ }
+ }
+
+ return o, nil
+}
+
+// Count returns the count of all User records in the query.
+func (q userQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return 0, errors.Wrap(err, "models: failed to count users rows")
+ }
+
+ return count, nil
+}
+
+// Exists checks if the row exists in the table.
+func (q userQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return false, errors.Wrap(err, "models: failed to check if users exists")
+ }
+
+ return count > 0, nil
+}
+
+// Users retrieves all the records using an executor.
+func Users(mods ...qm.QueryMod) userQuery {
+ mods = append(mods, qm.From("\"users\""))
+ q := NewQuery(mods...)
+ if len(queries.GetSelect(q)) == 0 {
+ queries.SetSelect(q, []string{"\"users\".*"})
+ }
+
+ return userQuery{q}
+}
+
+// FindUser retrieves a single record by ID with an executor.
+// If selectCols is empty Find will return all columns.
+func FindUser(ctx context.Context, exec boil.ContextExecutor, iD int, selectCols ...string) (*User, error) {
+ userObj := &User{}
+
+ sel := "*"
+ if len(selectCols) > 0 {
+ sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
+ }
+ query := fmt.Sprintf(
+ "select %s from \"users\" where \"id\"=$1", sel,
+ )
+
+ q := queries.Raw(query, iD)
+
+ err := q.Bind(ctx, exec, userObj)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "models: unable to select from users")
+ }
+
+ if err = userObj.doAfterSelectHooks(ctx, exec); err != nil {
+ return userObj, err
+ }
+
+ return userObj, nil
+}
+
+// Insert a single record using an executor.
+// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
+func (o *User) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
+ if o == nil {
+ return errors.New("models: no users provided for insertion")
+ }
+
+ var err error
+ if !boil.TimestampsAreSkipped(ctx) {
+ currTime := time.Now().In(boil.GetLocation())
+
+ if o.CreatedAt.IsZero() {
+ o.CreatedAt = currTime
+ }
+ if o.UpdatedAt.IsZero() {
+ o.UpdatedAt = currTime
+ }
+ }
+
+ if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(userColumnsWithDefault, o)
+
+ key := makeCacheKey(columns, nzDefaults)
+ userInsertCacheMut.RLock()
+ cache, cached := userInsertCache[key]
+ userInsertCacheMut.RUnlock()
+
+ if !cached {
+ wl, returnColumns := columns.InsertColumnSet(
+ userAllColumns,
+ userColumnsWithDefault,
+ userColumnsWithoutDefault,
+ nzDefaults,
+ )
+
+ cache.valueMapping, err = queries.BindMapping(userType, userMapping, wl)
+ if err != nil {
+ return err
+ }
+ cache.retMapping, err = queries.BindMapping(userType, userMapping, returnColumns)
+ if err != nil {
+ return err
+ }
+ if len(wl) != 0 {
+ cache.query = fmt.Sprintf("INSERT INTO \"users\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
+ } else {
+ cache.query = "INSERT INTO \"users\" %sDEFAULT VALUES%s"
+ }
+
+ var queryOutput, queryReturning string
+
+ if len(cache.retMapping) != 0 {
+ queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
+ }
+
+ cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+
+ if err != nil {
+ return errors.Wrap(err, "models: unable to insert into users")
+ }
+
+ if !cached {
+ userInsertCacheMut.Lock()
+ userInsertCache[key] = cache
+ userInsertCacheMut.Unlock()
+ }
+
+ return o.doAfterInsertHooks(ctx, exec)
+}
+
+// Update uses an executor to update the User.
+// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
+// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
+func (o *User) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
+ if !boil.TimestampsAreSkipped(ctx) {
+ currTime := time.Now().In(boil.GetLocation())
+
+ o.UpdatedAt = currTime
+ }
+
+ var err error
+ if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ key := makeCacheKey(columns, nil)
+ userUpdateCacheMut.RLock()
+ cache, cached := userUpdateCache[key]
+ userUpdateCacheMut.RUnlock()
+
+ if !cached {
+ wl := columns.UpdateColumnSet(
+ userAllColumns,
+ userPrimaryKeyColumns,
+ )
+
+ if !columns.IsWhitelist() {
+ wl = strmangle.SetComplement(wl, []string{"created_at"})
+ }
+ if len(wl) == 0 {
+ return 0, errors.New("models: unable to update users, could not build whitelist")
+ }
+
+ cache.query = fmt.Sprintf("UPDATE \"users\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 1, wl),
+ strmangle.WhereClause("\"", "\"", len(wl)+1, userPrimaryKeyColumns),
+ )
+ cache.valueMapping, err = queries.BindMapping(userType, userMapping, append(wl, userPrimaryKeyColumns...))
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, values)
+ }
+ var result sql.Result
+ result, err = exec.ExecContext(ctx, cache.query, values...)
+ if err != nil {
+ return 0, errors.Wrap(err, "models: unable to update users row")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "models: failed to get rows affected by update for users")
+ }
+
+ if !cached {
+ userUpdateCacheMut.Lock()
+ userUpdateCache[key] = cache
+ userUpdateCacheMut.Unlock()
+ }
+
+ return rowsAff, o.doAfterUpdateHooks(ctx, exec)
+}
+
+// UpdateAll updates all rows with the specified column values.
+func (q userQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ queries.SetUpdate(q.Query, cols)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "models: unable to update all for users")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "models: unable to retrieve rows affected for users")
+ }
+
+ return rowsAff, nil
+}
+
+// UpdateAll updates all rows with the specified column values, using an executor.
+func (o UserSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ ln := int64(len(o))
+ if ln == 0 {
+ return 0, nil
+ }
+
+ if len(cols) == 0 {
+ return 0, errors.New("models: update all requires at least one column argument")
+ }
+
+ colNames := make([]string, len(cols))
+ args := make([]interface{}, len(cols))
+
+ i := 0
+ for name, value := range cols {
+ colNames[i] = name
+ args[i] = value
+ i++
+ }
+
+ // Append all of the primary key values for each column
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), userPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := fmt.Sprintf("UPDATE \"users\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 1, colNames),
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, userPrimaryKeyColumns, len(o)))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "models: unable to update all in user slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all user")
+ }
+ return rowsAff, nil
+}
+
+// Upsert attempts an insert using an executor, and does an update or ignore on conflict.
+// See boil.Columns documentation for how to properly use updateColumns and insertColumns.
+func (o *User) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns, opts ...UpsertOptionFunc) error {
+ if o == nil {
+ return errors.New("models: no users provided for upsert")
+ }
+ if !boil.TimestampsAreSkipped(ctx) {
+ currTime := time.Now().In(boil.GetLocation())
+
+ if o.CreatedAt.IsZero() {
+ o.CreatedAt = currTime
+ }
+ o.UpdatedAt = currTime
+ }
+
+ if err := o.doBeforeUpsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(userColumnsWithDefault, o)
+
+ // Build cache key in-line uglily - mysql vs psql problems
+ buf := strmangle.GetBuffer()
+ if updateOnConflict {
+ buf.WriteByte('t')
+ } else {
+ buf.WriteByte('f')
+ }
+ buf.WriteByte('.')
+ for _, c := range conflictColumns {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(updateColumns.Kind))
+ for _, c := range updateColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(insertColumns.Kind))
+ for _, c := range insertColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ for _, c := range nzDefaults {
+ buf.WriteString(c)
+ }
+ key := buf.String()
+ strmangle.PutBuffer(buf)
+
+ userUpsertCacheMut.RLock()
+ cache, cached := userUpsertCache[key]
+ userUpsertCacheMut.RUnlock()
+
+ var err error
+
+ if !cached {
+ insert, _ := insertColumns.InsertColumnSet(
+ userAllColumns,
+ userColumnsWithDefault,
+ userColumnsWithoutDefault,
+ nzDefaults,
+ )
+
+ update := updateColumns.UpdateColumnSet(
+ userAllColumns,
+ userPrimaryKeyColumns,
+ )
+
+ if updateOnConflict && len(update) == 0 {
+ return errors.New("models: unable to upsert users, could not build update column list")
+ }
+
+ ret := strmangle.SetComplement(userAllColumns, strmangle.SetIntersect(insert, update))
+
+ conflict := conflictColumns
+ if len(conflict) == 0 && updateOnConflict && len(update) != 0 {
+ if len(userPrimaryKeyColumns) == 0 {
+ return errors.New("models: unable to upsert users, could not build conflict column list")
+ }
+
+ conflict = make([]string, len(userPrimaryKeyColumns))
+ copy(conflict, userPrimaryKeyColumns)
+ }
+ cache.query = buildUpsertQueryPostgres(dialect, "\"users\"", updateOnConflict, ret, update, conflict, insert, opts...)
+
+ cache.valueMapping, err = queries.BindMapping(userType, userMapping, insert)
+ if err != nil {
+ return err
+ }
+ if len(ret) != 0 {
+ cache.retMapping, err = queries.BindMapping(userType, userMapping, ret)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+ var returns []interface{}
+ if len(cache.retMapping) != 0 {
+ returns = queries.PtrsFromMapping(value, cache.retMapping)
+ }
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...)
+ if errors.Is(err, sql.ErrNoRows) {
+ err = nil // Postgres doesn't return anything when there's no update
+ }
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+ if err != nil {
+ return errors.Wrap(err, "models: unable to upsert users")
+ }
+
+ if !cached {
+ userUpsertCacheMut.Lock()
+ userUpsertCache[key] = cache
+ userUpsertCacheMut.Unlock()
+ }
+
+ return o.doAfterUpsertHooks(ctx, exec)
+}
+
+// Delete deletes a single User record with an executor.
+// Delete will match against the primary key column to find the record to delete.
+func (o *User) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if o == nil {
+ return 0, errors.New("models: no User provided for delete")
+ }
+
+ if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), userPrimaryKeyMapping)
+ sql := "DELETE FROM \"users\" WHERE \"id\"=$1"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "models: unable to delete from users")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "models: failed to get rows affected by delete for users")
+ }
+
+ if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all matching rows.
+func (q userQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if q.Query == nil {
+ return 0, errors.New("models: no userQuery provided for delete all")
+ }
+
+ queries.SetDelete(q.Query)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "models: unable to delete all from users")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for users")
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all rows in the slice, using an executor.
+func (o UserSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if len(o) == 0 {
+ return 0, nil
+ }
+
+ if len(userBeforeDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ var args []interface{}
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), userPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "DELETE FROM \"users\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, userPrimaryKeyColumns, len(o))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "models: unable to delete all from user slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for users")
+ }
+
+ if len(userAfterDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ return rowsAff, nil
+}
+
+// Reload refetches the object from the database
+// using the primary keys with an executor.
+func (o *User) Reload(ctx context.Context, exec boil.ContextExecutor) error {
+ ret, err := FindUser(ctx, exec, o.ID)
+ if err != nil {
+ return err
+ }
+
+ *o = *ret
+ return nil
+}
+
+// ReloadAll refetches every row with matching primary key column values
+// and overwrites the original object slice with the newly updated slice.
+func (o *UserSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
+ if o == nil || len(*o) == 0 {
+ return nil
+ }
+
+ slice := UserSlice{}
+ var args []interface{}
+ for _, obj := range *o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), userPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "SELECT \"users\".* FROM \"users\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, userPrimaryKeyColumns, len(*o))
+
+ q := queries.Raw(sql, args...)
+
+ err := q.Bind(ctx, exec, &slice)
+ if err != nil {
+ return errors.Wrap(err, "models: unable to reload all in UserSlice")
+ }
+
+ *o = slice
+
+ return nil
+}
+
+// UserExists checks if the User row exists.
+func UserExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) {
+ var exists bool
+ sql := "select exists(select 1 from \"users\" where \"id\"=$1 limit 1)"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, iD)
+ }
+ row := exec.QueryRowContext(ctx, sql, iD)
+
+ err := row.Scan(&exists)
+ if err != nil {
+ return false, errors.Wrap(err, "models: unable to check if users exists")
+ }
+
+ return exists, nil
+}
+
+// Exists checks if the User row exists.
+func (o *User) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
+ return UserExists(ctx, exec, o.ID)
+}
diff --git a/src/models/users_test.go b/src/models/users_test.go
new file mode 100644
index 0000000..878ae32
--- /dev/null
+++ b/src/models/users_test.go
@@ -0,0 +1,732 @@
+// Code generated by SQLBoiler 4.16.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package models
+
+import (
+ "bytes"
+ "context"
+ "reflect"
+ "testing"
+
+ "github.com/volatiletech/randomize"
+ "github.com/volatiletech/sqlboiler/v4/boil"
+ "github.com/volatiletech/sqlboiler/v4/queries"
+ "github.com/volatiletech/strmangle"
+)
+
+var (
+ // Relationships sometimes use the reflection helper queries.Equal/queries.Assign
+ // so force a package dependency in case they don't.
+ _ = queries.Equal
+)
+
+func testUsers(t *testing.T) {
+ t.Parallel()
+
+ query := Users()
+
+ if query.Query == nil {
+ t.Error("expected a query, got nothing")
+ }
+}
+
+func testUsersDelete(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ if rowsAff, err := o.Delete(ctx, tx); err != nil {
+ t.Error(err)
+ } else if rowsAff != 1 {
+ t.Error("should only have deleted one row, but affected:", rowsAff)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if count != 0 {
+ t.Error("want zero records, got:", count)
+ }
+}
+
+func testUsersQueryDeleteAll(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ if rowsAff, err := Users().DeleteAll(ctx, tx); err != nil {
+ t.Error(err)
+ } else if rowsAff != 1 {
+ t.Error("should only have deleted one row, but affected:", rowsAff)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if count != 0 {
+ t.Error("want zero records, got:", count)
+ }
+}
+
+func testUsersSliceDeleteAll(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ slice := UserSlice{o}
+
+ if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
+ t.Error(err)
+ } else if rowsAff != 1 {
+ t.Error("should only have deleted one row, but affected:", rowsAff)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if count != 0 {
+ t.Error("want zero records, got:", count)
+ }
+}
+
+func testUsersExists(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ e, err := UserExists(ctx, tx, o.ID)
+ if err != nil {
+ t.Errorf("Unable to check if User exists: %s", err)
+ }
+ if !e {
+ t.Errorf("Expected UserExists to return true, but got false.")
+ }
+}
+
+func testUsersFind(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ userFound, err := FindUser(ctx, tx, o.ID)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if userFound == nil {
+ t.Error("want a record, got nil")
+ }
+}
+
+func testUsersBind(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ if err = Users().Bind(ctx, tx, o); err != nil {
+ t.Error(err)
+ }
+}
+
+func testUsersOne(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ if x, err := Users().One(ctx, tx); err != nil {
+ t.Error(err)
+ } else if x == nil {
+ t.Error("expected to get a non nil record")
+ }
+}
+
+func testUsersAll(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ userOne := &User{}
+ userTwo := &User{}
+ if err = randomize.Struct(seed, userOne, userDBTypes, false, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+ if err = randomize.Struct(seed, userTwo, userDBTypes, false, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = userOne.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+ if err = userTwo.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ slice, err := Users().All(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(slice) != 2 {
+ t.Error("want 2 records, got:", len(slice))
+ }
+}
+
+func testUsersCount(t *testing.T) {
+ t.Parallel()
+
+ var err error
+ seed := randomize.NewSeed()
+ userOne := &User{}
+ userTwo := &User{}
+ if err = randomize.Struct(seed, userOne, userDBTypes, false, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+ if err = randomize.Struct(seed, userTwo, userDBTypes, false, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = userOne.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+ if err = userTwo.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if count != 2 {
+ t.Error("want 2 records, got:", count)
+ }
+}
+
+func userBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func userAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func userAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func userBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func userAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func userBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func userAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func userBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func userAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
+ *o = User{}
+ return nil
+}
+
+func testUsersHooks(t *testing.T) {
+ t.Parallel()
+
+ var err error
+
+ ctx := context.Background()
+ empty := &User{}
+ o := &User{}
+
+ seed := randomize.NewSeed()
+ if err = randomize.Struct(seed, o, userDBTypes, false); err != nil {
+ t.Errorf("Unable to randomize User object: %s", err)
+ }
+
+ AddUserHook(boil.BeforeInsertHook, userBeforeInsertHook)
+ if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
+ }
+ userBeforeInsertHooks = []UserHook{}
+
+ AddUserHook(boil.AfterInsertHook, userAfterInsertHook)
+ if err = o.doAfterInsertHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
+ }
+ userAfterInsertHooks = []UserHook{}
+
+ AddUserHook(boil.AfterSelectHook, userAfterSelectHook)
+ if err = o.doAfterSelectHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
+ }
+ userAfterSelectHooks = []UserHook{}
+
+ AddUserHook(boil.BeforeUpdateHook, userBeforeUpdateHook)
+ if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
+ }
+ userBeforeUpdateHooks = []UserHook{}
+
+ AddUserHook(boil.AfterUpdateHook, userAfterUpdateHook)
+ if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
+ }
+ userAfterUpdateHooks = []UserHook{}
+
+ AddUserHook(boil.BeforeDeleteHook, userBeforeDeleteHook)
+ if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
+ }
+ userBeforeDeleteHooks = []UserHook{}
+
+ AddUserHook(boil.AfterDeleteHook, userAfterDeleteHook)
+ if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
+ }
+ userAfterDeleteHooks = []UserHook{}
+
+ AddUserHook(boil.BeforeUpsertHook, userBeforeUpsertHook)
+ if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
+ }
+ userBeforeUpsertHooks = []UserHook{}
+
+ AddUserHook(boil.AfterUpsertHook, userAfterUpsertHook)
+ if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
+ t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
+ }
+ if !reflect.DeepEqual(o, empty) {
+ t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
+ }
+ userAfterUpsertHooks = []UserHook{}
+}
+
+func testUsersInsert(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if count != 1 {
+ t.Error("want one record, got:", count)
+ }
+}
+
+func testUsersInsertWhitelist(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Whitelist(userColumnsWithoutDefault...)); err != nil {
+ t.Error(err)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if count != 1 {
+ t.Error("want one record, got:", count)
+ }
+}
+
+func testUsersReload(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ if err = o.Reload(ctx, tx); err != nil {
+ t.Error(err)
+ }
+}
+
+func testUsersReloadAll(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ slice := UserSlice{o}
+
+ if err = slice.ReloadAll(ctx, tx); err != nil {
+ t.Error(err)
+ }
+}
+
+func testUsersSelect(t *testing.T) {
+ t.Parallel()
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ slice, err := Users().All(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(slice) != 1 {
+ t.Error("want one record, got:", len(slice))
+ }
+}
+
+var (
+ userDBTypes = map[string]string{`ID`: `integer`, `Username`: `character varying`, `Secret`: `character varying`, `Email`: `character varying`, `CreatedAt`: `timestamp with time zone`, `UpdatedAt`: `timestamp with time zone`}
+ _ = bytes.MinRead
+)
+
+func testUsersUpdate(t *testing.T) {
+ t.Parallel()
+
+ if 0 == len(userPrimaryKeyColumns) {
+ t.Skip("Skipping table with no primary key columns")
+ }
+ if len(userAllColumns) == len(userPrimaryKeyColumns) {
+ t.Skip("Skipping table with only primary key columns")
+ }
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if count != 1 {
+ t.Error("want one record, got:", count)
+ }
+
+ if err = randomize.Struct(seed, o, userDBTypes, true, userPrimaryKeyColumns...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ } else if rowsAff != 1 {
+ t.Error("should only affect one row but affected", rowsAff)
+ }
+}
+
+func testUsersSliceUpdateAll(t *testing.T) {
+ t.Parallel()
+
+ if len(userAllColumns) == len(userPrimaryKeyColumns) {
+ t.Skip("Skipping table with only primary key columns")
+ }
+
+ seed := randomize.NewSeed()
+ var err error
+ o := &User{}
+ if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
+ t.Error(err)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if count != 1 {
+ t.Error("want one record, got:", count)
+ }
+
+ if err = randomize.Struct(seed, o, userDBTypes, true, userPrimaryKeyColumns...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ // Remove Primary keys and unique columns from what we plan to update
+ var fields []string
+ if strmangle.StringSliceMatch(userAllColumns, userPrimaryKeyColumns) {
+ fields = userAllColumns
+ } else {
+ fields = strmangle.SetComplement(
+ userAllColumns,
+ userPrimaryKeyColumns,
+ )
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ typ := reflect.TypeOf(o).Elem()
+ n := typ.NumField()
+
+ updateMap := M{}
+ for _, col := range fields {
+ for i := 0; i < n; i++ {
+ f := typ.Field(i)
+ if f.Tag.Get("boil") == col {
+ updateMap[col] = value.Field(i).Interface()
+ }
+ }
+ }
+
+ slice := UserSlice{o}
+ if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
+ t.Error(err)
+ } else if rowsAff != 1 {
+ t.Error("wanted one record updated but got", rowsAff)
+ }
+}
+
+func testUsersUpsert(t *testing.T) {
+ t.Parallel()
+
+ if len(userAllColumns) == len(userPrimaryKeyColumns) {
+ t.Skip("Skipping table with only primary key columns")
+ }
+
+ seed := randomize.NewSeed()
+ var err error
+ // Attempt the INSERT side of an UPSERT
+ o := User{}
+ if err = randomize.Struct(seed, &o, userDBTypes, true); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ ctx := context.Background()
+ tx := MustTx(boil.BeginTx(ctx, nil))
+ defer func() { _ = tx.Rollback() }()
+ if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil {
+ t.Errorf("Unable to upsert User: %s", err)
+ }
+
+ count, err := Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+ if count != 1 {
+ t.Error("want one record, got:", count)
+ }
+
+ // Attempt the UPDATE side of an UPSERT
+ if err = randomize.Struct(seed, &o, userDBTypes, false, userPrimaryKeyColumns...); err != nil {
+ t.Errorf("Unable to randomize User struct: %s", err)
+ }
+
+ if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil {
+ t.Errorf("Unable to upsert User: %s", err)
+ }
+
+ count, err = Users().Count(ctx, tx)
+ if err != nil {
+ t.Error(err)
+ }
+ if count != 1 {
+ t.Error("want one record, got:", count)
+ }
+}