@@ -620,6 +620,18 @@ func GetCloudbrainByJobID(jobID string) (*Cloudbrain, error) { | |||||
return getRepoCloudBrain(cb) | return getRepoCloudBrain(cb) | ||||
} | } | ||||
func GetCloudbrainsNeededStopByUserID(userID int64) ([]*Cloudbrain, error) { | |||||
cloudBrains := make([]*Cloudbrain, 0) | |||||
err := x.Cols("job_id", "status", "type").Where("user_id=? AND (status =? OR status=?)", userID, string(JobRunning), string(JobWaiting)).Find(&cloudBrains) | |||||
return cloudBrains, err | |||||
} | |||||
func GetCloudbrainsNeededStopByRepoID(repoID int64) ([]*Cloudbrain, error) { | |||||
cloudBrains := make([]*Cloudbrain, 0) | |||||
err := x.Cols("job_id", "status", "type").Where("repo_id=? AND (status =? OR status=?)", repoID, string(JobRunning), string(JobWaiting)).Find(&cloudBrains) | |||||
return cloudBrains, err | |||||
} | |||||
func SetCloudbrainStatusByJobID(jobID string, status CloudbrainStatus) (err error) { | func SetCloudbrainStatusByJobID(jobID string, status CloudbrainStatus) (err error) { | ||||
cb := &Cloudbrain{JobID: jobID, Status: string(status)} | cb := &Cloudbrain{JobID: jobID, Status: string(status)} | ||||
_, err = x.Cols("status").Where("cloudbrain.job_id=?", jobID).Update(cb) | _, err = x.Cols("status").Where("cloudbrain.job_id=?", jobID).Update(cb) | ||||
@@ -59,6 +59,9 @@ var ( | |||||
x *xorm.Engine | x *xorm.Engine | ||||
tables []interface{} | tables []interface{} | ||||
xStatistic *xorm.Engine | |||||
tablesStatistic []interface{} | |||||
// HasEngine specifies if we have a xorm.Engine | // HasEngine specifies if we have a xorm.Engine | ||||
HasEngine bool | HasEngine bool | ||||
) | ) | ||||
@@ -132,14 +135,17 @@ func init() { | |||||
new(RecommendOrg), | new(RecommendOrg), | ||||
) | ) | ||||
tablesStatistic = append(tablesStatistic, | |||||
new(FileChunk)) | |||||
gonicNames := []string{"SSL", "UID"} | gonicNames := []string{"SSL", "UID"} | ||||
for _, name := range gonicNames { | for _, name := range gonicNames { | ||||
names.LintGonicMapper[name] = true | names.LintGonicMapper[name] = true | ||||
} | } | ||||
} | } | ||||
func getEngine() (*xorm.Engine, error) { | |||||
connStr, err := setting.DBConnStr() | |||||
func getEngine(database *setting.DBInfo) (*xorm.Engine, error) { | |||||
connStr, err := setting.DBConnStr(database) | |||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
@@ -153,14 +159,12 @@ func getEngine() (*xorm.Engine, error) { | |||||
} | } | ||||
engine.SetSchema(setting.Database.Schema) | engine.SetSchema(setting.Database.Schema) | ||||
HasEngine = true | |||||
return engine, nil | return engine, nil | ||||
} | } | ||||
// NewTestEngine sets a new test xorm.Engine | // NewTestEngine sets a new test xorm.Engine | ||||
func NewTestEngine(x *xorm.Engine) (err error) { | func NewTestEngine(x *xorm.Engine) (err error) { | ||||
x, err = getEngine() | |||||
x, err = getEngine(setting.Database) | |||||
if err != nil { | if err != nil { | ||||
return fmt.Errorf("Connect to database: %v", err) | return fmt.Errorf("Connect to database: %v", err) | ||||
} | } | ||||
@@ -171,43 +175,80 @@ func NewTestEngine(x *xorm.Engine) (err error) { | |||||
return x.StoreEngine("InnoDB").Sync2(tables...) | return x.StoreEngine("InnoDB").Sync2(tables...) | ||||
} | } | ||||
// SetEngine sets the xorm.Engine | |||||
// setEngine sets the xorm.Engine | |||||
func setEngine(engine *xorm.Engine, table []interface{}, database *setting.DBInfo) (err error) { | |||||
engine.SetMapper(names.GonicMapper{}) | |||||
// WARNING: for serv command, MUST remove the output to os.stdout, | |||||
// so use log file to instead print to stdout. | |||||
engine.SetLogger(NewXORMLogger(setting.Database.LogSQL)) | |||||
engine.ShowSQL(setting.Database.LogSQL) | |||||
engine.SetMaxOpenConns(setting.Database.MaxOpenConns) | |||||
engine.SetMaxIdleConns(setting.Database.MaxIdleConns) | |||||
engine.SetConnMaxLifetime(setting.Database.ConnMaxLifetime) | |||||
engine.Sync2(table...) | |||||
MigrateCustom(engine) | |||||
return nil | |||||
} | |||||
func SetEngine() (err error) { | func SetEngine() (err error) { | ||||
x, err = getEngine() | |||||
x, err = getEngine(setting.Database) | |||||
if err != nil { | if err != nil { | ||||
return fmt.Errorf("Failed to connect to database: %v", err) | return fmt.Errorf("Failed to connect to database: %v", err) | ||||
} | } | ||||
if err = setEngine(x, tables, setting.Database); err != nil { | |||||
return err | |||||
} | |||||
xStatistic, err = getEngine(setting.DatabaseStatistic) | |||||
if err != nil { | |||||
return fmt.Errorf("Failed to connect to database: %v", err) | |||||
} | |||||
if err = setEngine(xStatistic, tablesStatistic, setting.DatabaseStatistic); err != nil { | |||||
return err | |||||
} | |||||
x.SetMapper(names.GonicMapper{}) | |||||
// WARNING: for serv command, MUST remove the output to os.stdout, | |||||
// so use log file to instead print to stdout. | |||||
x.SetLogger(NewXORMLogger(setting.Database.LogSQL)) | |||||
x.ShowSQL(setting.Database.LogSQL) | |||||
x.SetMaxOpenConns(setting.Database.MaxOpenConns) | |||||
x.SetMaxIdleConns(setting.Database.MaxIdleConns) | |||||
x.SetConnMaxLifetime(setting.Database.ConnMaxLifetime) | |||||
x.Sync2(tables...) | |||||
MigrateCustom(x) | |||||
return nil | return nil | ||||
} | } | ||||
// NewEngine initializes a new xorm.Engine | |||||
func NewEngine(ctx context.Context, migrateFunc func(*xorm.Engine) error) (err error) { | func NewEngine(ctx context.Context, migrateFunc func(*xorm.Engine) error) (err error) { | ||||
if err = SetEngine(); err != nil { | |||||
x, err = getEngine(setting.Database) | |||||
if err != nil { | |||||
return fmt.Errorf("Failed to connect to database: %v", err) | |||||
} | |||||
if err = newEngine(ctx, migrateFunc, x, tables, setting.Database); err != nil { | |||||
return fmt.Errorf("newEngine failed: %v", err) | |||||
} | |||||
xStatistic, err = getEngine(setting.DatabaseStatistic) | |||||
if err != nil { | |||||
return fmt.Errorf("Failed to connect to database: %v", err) | |||||
} | |||||
if err = newEngine(ctx, migrateFunc, xStatistic, tablesStatistic, setting.DatabaseStatistic); err != nil { | |||||
return fmt.Errorf("newEngine statistic failed: %v", err) | |||||
} | |||||
HasEngine = true | |||||
return nil | |||||
} | |||||
// newEngine initializes a new xorm.Engine | |||||
func newEngine(ctx context.Context, migrateFunc func(*xorm.Engine) error, engine *xorm.Engine, table []interface{}, database *setting.DBInfo) (err error) { | |||||
if err = setEngine(engine, table, database); err != nil { | |||||
return err | return err | ||||
} | } | ||||
x.SetDefaultContext(ctx) | |||||
engine.SetDefaultContext(ctx) | |||||
if err = x.Ping(); err != nil { | |||||
if err = engine.Ping(); err != nil { | |||||
return err | return err | ||||
} | } | ||||
if err = migrateFunc(x); err != nil { | |||||
if err = migrateFunc(engine); err != nil { | |||||
return fmt.Errorf("migrate: %v", err) | return fmt.Errorf("migrate: %v", err) | ||||
} | } | ||||
if err = x.StoreEngine("InnoDB").Sync2(tables...); err != nil { | |||||
if err = engine.StoreEngine("InnoDB").Sync2(table...); err != nil { | |||||
return fmt.Errorf("sync database struct error: %v", err) | return fmt.Errorf("sync database struct error: %v", err) | ||||
} | } | ||||
@@ -257,6 +298,11 @@ func Ping() error { | |||||
if x != nil { | if x != nil { | ||||
return x.Ping() | return x.Ping() | ||||
} | } | ||||
if xStatistic != nil { | |||||
return xStatistic.Ping() | |||||
} | |||||
return errors.New("database not configured") | return errors.New("database not configured") | ||||
} | } | ||||
@@ -1424,6 +1424,12 @@ func GetAllRepositories() ([]*Repository, error) { | |||||
return getALLRepositories(x) | return getALLRepositories(x) | ||||
} | } | ||||
func GetAllRepositoriesByFilterCols(columns ...string) ([]*Repository, error) { | |||||
repos := make([]*Repository, 0, 1000) | |||||
return repos, x.Cols(columns...).Find(&repos) | |||||
} | |||||
func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) { | func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) { | ||||
repo.LowerName = strings.ToLower(repo.Name) | repo.LowerName = strings.ToLower(repo.Name) | ||||
@@ -0,0 +1,39 @@ | |||||
package models | |||||
import "code.gitea.io/gitea/modules/git" | |||||
func GetRepoKPIStats(repo *Repository) (*git.RepoKPIStats, error) { | |||||
return git.GetRepoKPIStats(repo.RepoPath()) | |||||
} | |||||
func GetAllUserKPIStats() (map[string]*git.UserKPIStats, error) { | |||||
authors := make(map[string]*git.UserKPIStats) | |||||
repositorys, err := GetAllRepositoriesByFilterCols("owner_name", "name") | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
for _, repository := range repositorys { | |||||
authorsOneRepo, err1 := git.GetUserKPIStats(repository.RepoPath()) | |||||
if err1 != nil { | |||||
return nil, err | |||||
} | |||||
for key, value := range authorsOneRepo { | |||||
if _, ok := authors[key]; !ok { | |||||
authors[key] = &git.UserKPIStats{ | |||||
Name: value.Name, | |||||
Email: value.Email, | |||||
Commits: 0, | |||||
CommitLines: 0, | |||||
} | |||||
} | |||||
authors[key].Commits += value.Commits | |||||
authors[key].CommitLines += value.CommitLines | |||||
} | |||||
} | |||||
return authors, nil | |||||
} |
@@ -0,0 +1,258 @@ | |||||
package git | |||||
import ( | |||||
"bufio" | |||||
"bytes" | |||||
"fmt" | |||||
"sort" | |||||
"strconv" | |||||
"strings" | |||||
"time" | |||||
) | |||||
type RepoKPIStats struct { | |||||
Contributors int64 | |||||
KeyContributors int64 | |||||
ContributorsAdded int64 | |||||
CommitsAdded int64 | |||||
CommitLinesModified int64 | |||||
Authors []*UserKPITypeStats | |||||
} | |||||
type UserKPIStats struct { | |||||
Name string | |||||
Email string | |||||
Commits int64 | |||||
CommitLines int64 | |||||
} | |||||
type UserKPITypeStats struct { | |||||
UserKPIStats | |||||
isNewContributor bool //是否是4个月内的新增贡献者 | |||||
} | |||||
func GetRepoKPIStats(repoPath string) (*RepoKPIStats, error) { | |||||
stats := &RepoKPIStats{} | |||||
contributors, err := GetContributors(repoPath) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
timeUntil := time.Now() | |||||
fourMonthAgo := timeUntil.AddDate(0, -4, 0) | |||||
recentlyContributors, err := getContributors(repoPath, fourMonthAgo) | |||||
newContributersDict := make(map[string]struct{}) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if contributors != nil { | |||||
stats.Contributors = int64(len(contributors)) | |||||
for _, contributor := range contributors { | |||||
if contributor.CommitCnt >= 3 { | |||||
stats.KeyContributors++ | |||||
} | |||||
if recentlyContributors != nil { | |||||
for _, recentlyContributor := range recentlyContributors { | |||||
if recentlyContributor.Email == contributor.Email && recentlyContributor.CommitCnt == contributor.CommitCnt { | |||||
stats.ContributorsAdded++ | |||||
newContributersDict[recentlyContributor.Email] = struct{}{} | |||||
} | |||||
} | |||||
} | |||||
} | |||||
} | |||||
err = setRepoKPIStats(repoPath, fourMonthAgo, stats, newContributersDict) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("FillFromGit: %v", err) | |||||
} | |||||
return stats, nil | |||||
} | |||||
//获取一天内的用户贡献指标 | |||||
func GetUserKPIStats(repoPath string) (map[string]*UserKPIStats, error) { | |||||
timeUntil := time.Now() | |||||
oneDayAgo := timeUntil.AddDate(0, 0, -1) | |||||
since := oneDayAgo.Format(time.RFC3339) | |||||
args := []string{"log", "--numstat", "--no-merges", "--branches=*", "--pretty=format:---%n%h%n%an%n%ae%n", "--date=iso", fmt.Sprintf("--since='%s'", since)} | |||||
stdout, err := NewCommand(args...).RunInDirBytes(repoPath) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
scanner := bufio.NewScanner(bytes.NewReader(stdout)) | |||||
scanner.Split(bufio.ScanLines) | |||||
usersKPIStatses := make(map[string]*UserKPIStats) | |||||
var author string | |||||
p := 0 | |||||
var email string | |||||
for scanner.Scan() { | |||||
l := strings.TrimSpace(scanner.Text()) | |||||
if l == "---" { | |||||
p = 1 | |||||
} else if p == 0 { | |||||
continue | |||||
} else { | |||||
p++ | |||||
} | |||||
if p > 4 && len(l) == 0 { | |||||
continue | |||||
} | |||||
switch p { | |||||
case 1: // Separator | |||||
case 2: // Commit sha-1 | |||||
case 3: // Author | |||||
author = l | |||||
case 4: // E-mail | |||||
email = strings.ToLower(l) | |||||
if _, ok := usersKPIStatses[email]; !ok { | |||||
usersKPIStatses[email] = &UserKPIStats{ | |||||
Name: author, | |||||
Email: email, | |||||
Commits: 0, | |||||
CommitLines: 0, | |||||
} | |||||
} | |||||
usersKPIStatses[email].Commits++ | |||||
default: // Changed file | |||||
if parts := strings.Fields(l); len(parts) >= 3 { | |||||
if parts[0] != "-" { | |||||
if c, err := strconv.ParseInt(strings.TrimSpace(parts[0]), 10, 64); err == nil { | |||||
usersKPIStatses[email].CommitLines += c | |||||
} | |||||
} | |||||
if parts[1] != "-" { | |||||
if c, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64); err == nil { | |||||
usersKPIStatses[email].CommitLines += c | |||||
} | |||||
} | |||||
} | |||||
} | |||||
} | |||||
return usersKPIStatses, nil | |||||
} | |||||
func setRepoKPIStats(repoPath string, fromTime time.Time, stats *RepoKPIStats, newContributers map[string]struct{}) error { | |||||
since := fromTime.Format(time.RFC3339) | |||||
args := []string{"log", "--numstat", "--no-merges", "--branches=*", "--pretty=format:---%n%h%n%an%n%ae%n", "--date=iso", fmt.Sprintf("--since='%s'", since)} | |||||
stdout, err := NewCommand(args...).RunInDirBytes(repoPath) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
scanner := bufio.NewScanner(bytes.NewReader(stdout)) | |||||
scanner.Split(bufio.ScanLines) | |||||
authors := make(map[string]*UserKPITypeStats) | |||||
var author string | |||||
p := 0 | |||||
var email string | |||||
for scanner.Scan() { | |||||
l := strings.TrimSpace(scanner.Text()) | |||||
if l == "---" { | |||||
p = 1 | |||||
} else if p == 0 { | |||||
continue | |||||
} else { | |||||
p++ | |||||
} | |||||
if p > 4 && len(l) == 0 { | |||||
continue | |||||
} | |||||
switch p { | |||||
case 1: // Separator | |||||
case 2: // Commit sha-1 | |||||
stats.CommitsAdded++ | |||||
case 3: // Author | |||||
author = l | |||||
case 4: // E-mail | |||||
email = strings.ToLower(l) | |||||
if _, ok := authors[email]; !ok { | |||||
authors[email] = &UserKPITypeStats{ | |||||
UserKPIStats: UserKPIStats{ | |||||
Name: author, | |||||
Email: email, | |||||
Commits: 0, | |||||
CommitLines: 0, | |||||
}, | |||||
isNewContributor: false, | |||||
} | |||||
} | |||||
if _, ok := newContributers[email]; ok { | |||||
authors[email].isNewContributor = true | |||||
} | |||||
authors[email].Commits++ | |||||
default: // Changed file | |||||
if parts := strings.Fields(l); len(parts) >= 3 { | |||||
if parts[0] != "-" { | |||||
if c, err := strconv.ParseInt(strings.TrimSpace(parts[0]), 10, 64); err == nil { | |||||
stats.CommitLinesModified += c | |||||
authors[email].CommitLines += c | |||||
} | |||||
} | |||||
if parts[1] != "-" { | |||||
if c, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64); err == nil { | |||||
stats.CommitLinesModified += c | |||||
authors[email].CommitLines += c | |||||
} | |||||
} | |||||
} | |||||
} | |||||
} | |||||
a := make([]*UserKPITypeStats, 0, len(authors)) | |||||
for _, v := range authors { | |||||
a = append(a, v) | |||||
} | |||||
// Sort authors descending depending on commit count | |||||
sort.Slice(a, func(i, j int) bool { | |||||
return a[i].Commits > a[j].Commits | |||||
}) | |||||
stats.Authors = a | |||||
return nil | |||||
} | |||||
func getContributors(repoPath string, fromTime time.Time) ([]Contributor, error) { | |||||
since := fromTime.Format(time.RFC3339) | |||||
cmd := NewCommand("shortlog", "-sne", "--all", fmt.Sprintf("--since='%s'", since)) | |||||
stdout, err := cmd.RunInDir(repoPath) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
stdout = strings.Trim(stdout, "\n") | |||||
contributorRows := strings.Split(stdout, "\n") | |||||
if len(contributorRows) > 0 { | |||||
contributorsInfo := make([]Contributor, len(contributorRows)) | |||||
for i := 0; i < len(contributorRows); i++ { | |||||
var oneCount string = strings.Trim(contributorRows[i], " ") | |||||
if strings.Index(oneCount, "\t") < 0 { | |||||
continue | |||||
} | |||||
number := oneCount[0:strings.Index(oneCount, "\t")] | |||||
commitCnt, _ := strconv.Atoi(number) | |||||
committer := oneCount[strings.Index(oneCount, "\t")+1 : strings.LastIndex(oneCount, " ")] | |||||
committer = strings.Trim(committer, " ") | |||||
email := oneCount[strings.Index(oneCount, "<")+1 : strings.Index(oneCount, ">")] | |||||
contributorsInfo[i] = Contributor{ | |||||
commitCnt, committer, email, | |||||
} | |||||
} | |||||
return contributorsInfo, nil | |||||
} | |||||
return nil, nil | |||||
} |
@@ -24,111 +24,119 @@ var ( | |||||
EnableSQLite3 bool | EnableSQLite3 bool | ||||
// Database holds the database settings | // Database holds the database settings | ||||
Database = struct { | |||||
Type string | |||||
Host string | |||||
Name string | |||||
User string | |||||
Passwd string | |||||
Schema string | |||||
SSLMode string | |||||
Path string | |||||
LogSQL bool | |||||
Charset string | |||||
Timeout int // seconds | |||||
UseSQLite3 bool | |||||
UseMySQL bool | |||||
UseMSSQL bool | |||||
UsePostgreSQL bool | |||||
DBConnectRetries int | |||||
DBConnectBackoff time.Duration | |||||
MaxIdleConns int | |||||
MaxOpenConns int | |||||
ConnMaxLifetime time.Duration | |||||
IterateBufferSize int | |||||
}{ | |||||
Timeout: 500, | |||||
} | |||||
Database *DBInfo | |||||
DatabaseStatistic *DBInfo | |||||
) | ) | ||||
type DBInfo struct { | |||||
Type string | |||||
Host string | |||||
Name string | |||||
User string | |||||
Passwd string | |||||
Schema string | |||||
SSLMode string | |||||
Path string | |||||
LogSQL bool | |||||
Charset string | |||||
Timeout int // seconds | |||||
UseSQLite3 bool | |||||
UseMySQL bool | |||||
UseMSSQL bool | |||||
UsePostgreSQL bool | |||||
DBConnectRetries int | |||||
DBConnectBackoff time.Duration | |||||
MaxIdleConns int | |||||
MaxOpenConns int | |||||
ConnMaxLifetime time.Duration | |||||
IterateBufferSize int | |||||
} | |||||
// GetDBTypeByName returns the dataase type as it defined on XORM according the given name | // GetDBTypeByName returns the dataase type as it defined on XORM according the given name | ||||
func GetDBTypeByName(name string) string { | func GetDBTypeByName(name string) string { | ||||
return dbTypes[name] | return dbTypes[name] | ||||
} | } | ||||
// InitDBConfig loads the database settings | |||||
func InitDBConfig() { | |||||
sec := Cfg.Section("database") | |||||
Database.Type = sec.Key("DB_TYPE").String() | |||||
switch Database.Type { | |||||
// initDBConfig loads the database settings | |||||
func initDBConfig(section string, database *DBInfo) { | |||||
sec := Cfg.Section(section) | |||||
database.Type = sec.Key("DB_TYPE").String() | |||||
switch database.Type { | |||||
case "sqlite3": | case "sqlite3": | ||||
Database.UseSQLite3 = true | |||||
database.UseSQLite3 = true | |||||
case "mysql": | case "mysql": | ||||
Database.UseMySQL = true | |||||
database.UseMySQL = true | |||||
case "postgres": | case "postgres": | ||||
Database.UsePostgreSQL = true | |||||
database.UsePostgreSQL = true | |||||
case "mssql": | case "mssql": | ||||
Database.UseMSSQL = true | |||||
database.UseMSSQL = true | |||||
} | } | ||||
Database.Host = sec.Key("HOST").String() | |||||
Database.Name = sec.Key("NAME").String() | |||||
Database.User = sec.Key("USER").String() | |||||
if len(Database.Passwd) == 0 { | |||||
Database.Passwd = sec.Key("PASSWD").String() | |||||
database.Host = sec.Key("HOST").String() | |||||
database.Name = sec.Key("NAME").String() | |||||
database.User = sec.Key("USER").String() | |||||
if len(database.Passwd) == 0 { | |||||
database.Passwd = sec.Key("PASSWD").String() | |||||
} | } | ||||
Database.Schema = sec.Key("SCHEMA").String() | |||||
Database.SSLMode = sec.Key("SSL_MODE").MustString("disable") | |||||
Database.Charset = sec.Key("CHARSET").In("utf8", []string{"utf8", "utf8mb4"}) | |||||
Database.Path = sec.Key("PATH").MustString(filepath.Join(AppDataPath, "gitea.db")) | |||||
Database.Timeout = sec.Key("SQLITE_TIMEOUT").MustInt(500) | |||||
Database.MaxIdleConns = sec.Key("MAX_IDLE_CONNS").MustInt(2) | |||||
if Database.UseMySQL { | |||||
Database.ConnMaxLifetime = sec.Key("CONN_MAX_LIFE_TIME").MustDuration(3 * time.Second) | |||||
database.Schema = sec.Key("SCHEMA").String() | |||||
database.SSLMode = sec.Key("SSL_MODE").MustString("disable") | |||||
database.Charset = sec.Key("CHARSET").In("utf8", []string{"utf8", "utf8mb4"}) | |||||
database.Path = sec.Key("PATH").MustString(filepath.Join(AppDataPath, "gitea.db")) | |||||
database.Timeout = sec.Key("SQLITE_TIMEOUT").MustInt(500) | |||||
database.MaxIdleConns = sec.Key("MAX_IDLE_CONNS").MustInt(2) | |||||
if database.UseMySQL { | |||||
database.ConnMaxLifetime = sec.Key("CONN_MAX_LIFE_TIME").MustDuration(3 * time.Second) | |||||
} else { | } else { | ||||
Database.ConnMaxLifetime = sec.Key("CONN_MAX_LIFE_TIME").MustDuration(0) | |||||
database.ConnMaxLifetime = sec.Key("CONN_MAX_LIFE_TIME").MustDuration(0) | |||||
} | } | ||||
Database.MaxOpenConns = sec.Key("MAX_OPEN_CONNS").MustInt(0) | |||||
database.MaxOpenConns = sec.Key("MAX_OPEN_CONNS").MustInt(0) | |||||
Database.IterateBufferSize = sec.Key("ITERATE_BUFFER_SIZE").MustInt(50) | |||||
Database.LogSQL = sec.Key("LOG_SQL").MustBool(true) | |||||
Database.DBConnectRetries = sec.Key("DB_RETRIES").MustInt(10) | |||||
Database.DBConnectBackoff = sec.Key("DB_RETRY_BACKOFF").MustDuration(3 * time.Second) | |||||
database.IterateBufferSize = sec.Key("ITERATE_BUFFER_SIZE").MustInt(50) | |||||
database.LogSQL = sec.Key("LOG_SQL").MustBool(true) | |||||
database.DBConnectRetries = sec.Key("DB_RETRIES").MustInt(10) | |||||
database.DBConnectBackoff = sec.Key("DB_RETRY_BACKOFF").MustDuration(3 * time.Second) | |||||
} | |||||
func InitDBConfig() { | |||||
Database = new(DBInfo) | |||||
DatabaseStatistic = new(DBInfo) | |||||
initDBConfig("database", Database) | |||||
initDBConfig("database_statistic", DatabaseStatistic) | |||||
} | } | ||||
// DBConnStr returns database connection string | // DBConnStr returns database connection string | ||||
func DBConnStr() (string, error) { | |||||
func DBConnStr(database *DBInfo) (string, error) { | |||||
connStr := "" | connStr := "" | ||||
var Param = "?" | var Param = "?" | ||||
if strings.Contains(Database.Name, Param) { | |||||
if strings.Contains(database.Name, Param) { | |||||
Param = "&" | Param = "&" | ||||
} | } | ||||
switch Database.Type { | |||||
switch database.Type { | |||||
case "mysql": | case "mysql": | ||||
connType := "tcp" | connType := "tcp" | ||||
if Database.Host[0] == '/' { // looks like a unix socket | |||||
if database.Host[0] == '/' { // looks like a unix socket | |||||
connType = "unix" | connType = "unix" | ||||
} | } | ||||
tls := Database.SSLMode | |||||
tls := database.SSLMode | |||||
if tls == "disable" { // allow (Postgres-inspired) default value to work in MySQL | if tls == "disable" { // allow (Postgres-inspired) default value to work in MySQL | ||||
tls = "false" | tls = "false" | ||||
} | } | ||||
connStr = fmt.Sprintf("%s:%s@%s(%s)/%s%scharset=%s&parseTime=true&tls=%s", | connStr = fmt.Sprintf("%s:%s@%s(%s)/%s%scharset=%s&parseTime=true&tls=%s", | ||||
Database.User, Database.Passwd, connType, Database.Host, Database.Name, Param, Database.Charset, tls) | |||||
database.User, database.Passwd, connType, database.Host, database.Name, Param, database.Charset, tls) | |||||
case "postgres": | case "postgres": | ||||
connStr = getPostgreSQLConnectionString(Database.Host, Database.User, Database.Passwd, Database.Name, Param, Database.SSLMode) | |||||
connStr = getPostgreSQLConnectionString(database.Host, database.User, database.Passwd, database.Name, Param, database.SSLMode) | |||||
case "mssql": | case "mssql": | ||||
host, port := ParseMSSQLHostPort(Database.Host) | |||||
connStr = fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;", host, port, Database.Name, Database.User, Database.Passwd) | |||||
host, port := ParseMSSQLHostPort(database.Host) | |||||
connStr = fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;", host, port, database.Name, database.User, database.Passwd) | |||||
case "sqlite3": | case "sqlite3": | ||||
if !EnableSQLite3 { | if !EnableSQLite3 { | ||||
return "", errors.New("this binary version does not build support for SQLite3") | return "", errors.New("this binary version does not build support for SQLite3") | ||||
} | } | ||||
if err := os.MkdirAll(path.Dir(Database.Path), os.ModePerm); err != nil { | |||||
if err := os.MkdirAll(path.Dir(database.Path), os.ModePerm); err != nil { | |||||
return "", fmt.Errorf("Failed to create directories: %v", err) | return "", fmt.Errorf("Failed to create directories: %v", err) | ||||
} | } | ||||
connStr = fmt.Sprintf("file:%s?cache=shared&mode=rwc&_busy_timeout=%d&_txlock=immediate", Database.Path, Database.Timeout) | |||||
connStr = fmt.Sprintf("file:%s?cache=shared&mode=rwc&_busy_timeout=%d&_txlock=immediate", database.Path, database.Timeout) | |||||
default: | default: | ||||
return "", fmt.Errorf("Unknown database type: %s", Database.Type) | |||||
return "", fmt.Errorf("Unknown database type: %s", database.Type) | |||||
} | } | ||||
return connStr, nil | return connStr, nil | ||||
@@ -13,6 +13,8 @@ import ( | |||||
"strings" | "strings" | ||||
"time" | "time" | ||||
"code.gitea.io/gitea/modules/modelarts" | |||||
"code.gitea.io/gitea/modules/git" | "code.gitea.io/gitea/modules/git" | ||||
"code.gitea.io/gitea/modules/storage" | "code.gitea.io/gitea/modules/storage" | ||||
@@ -361,6 +363,58 @@ func CloudBrainStop(ctx *context.Context) { | |||||
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/cloudbrain") | ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/cloudbrain") | ||||
} | } | ||||
func StopJobsByUserID(userID int64) { | |||||
cloudBrains, err := models.GetCloudbrainsNeededStopByUserID(userID) | |||||
if err != nil { | |||||
log.Warn("Failed to get cloudBrain info", err) | |||||
return | |||||
} | |||||
StopJobs(cloudBrains) | |||||
} | |||||
func StopJobsByRepoID(repoID int64) { | |||||
cloudBrains, err := models.GetCloudbrainsNeededStopByRepoID(repoID) | |||||
if err != nil { | |||||
log.Warn("Failed to get cloudBrain info", err) | |||||
return | |||||
} | |||||
StopJobs(cloudBrains) | |||||
} | |||||
/** | |||||
*/ | |||||
func StopJobs(cloudBrains []*models.Cloudbrain) { | |||||
for _, taskInfo := range cloudBrains { | |||||
if taskInfo.Type == models.TypeCloudBrainOne { | |||||
err := cloudbrain.StopJob(taskInfo.JobID) | |||||
logErrorAndUpdateJobStatus(err, taskInfo) | |||||
} else { | |||||
param := models.NotebookAction{ | |||||
Action: models.ActionStop, | |||||
} | |||||
_, err := modelarts.StopJob(taskInfo.JobID, param) | |||||
logErrorAndUpdateJobStatus(err, taskInfo) | |||||
} | |||||
} | |||||
} | |||||
func logErrorAndUpdateJobStatus(err error, taskInfo *models.Cloudbrain) { | |||||
if err != nil { | |||||
log.Warn("Failed to stop cloudBrain job:"+taskInfo.JobID, err) | |||||
} else { | |||||
taskInfo.Status = string(models.JobStopped) | |||||
err = models.UpdateJob(taskInfo) | |||||
if err != nil { | |||||
log.Warn("UpdateJob failed", err) | |||||
} | |||||
} | |||||
} | |||||
func CloudBrainDel(ctx *context.Context) { | func CloudBrainDel(ctx *context.Context) { | ||||
var jobID = ctx.Params(":jobid") | var jobID = ctx.Params(":jobid") | ||||
task, err := models.GetCloudbrainByJobID(jobID) | task, err := models.GetCloudbrainByJobID(jobID) | ||||
@@ -440,6 +440,7 @@ func SettingsPost(ctx *context.Context, form auth.RepoSettingForm) { | |||||
return | return | ||||
} | } | ||||
log.Trace("Repository deleted: %s/%s", ctx.Repo.Owner.Name, repo.Name) | log.Trace("Repository deleted: %s/%s", ctx.Repo.Owner.Name, repo.Name) | ||||
go StopJobsByRepoID(repo.ID) | |||||
ctx.Flash.Success(ctx.Tr("repo.settings.deletion_success")) | ctx.Flash.Success(ctx.Tr("repo.settings.deletion_success")) | ||||
ctx.Redirect(ctx.Repo.Owner.DashboardLink()) | ctx.Redirect(ctx.Repo.Owner.DashboardLink()) | ||||
@@ -11,6 +11,8 @@ import ( | |||||
"net/http" | "net/http" | ||||
"strings" | "strings" | ||||
"code.gitea.io/gitea/routers/repo" | |||||
"code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
"code.gitea.io/gitea/modules/auth" | "code.gitea.io/gitea/modules/auth" | ||||
"code.gitea.io/gitea/modules/auth/oauth2" | "code.gitea.io/gitea/modules/auth/oauth2" | ||||
@@ -1056,6 +1058,7 @@ func SignOut(ctx *context.Context) { | |||||
}) | }) | ||||
} | } | ||||
HandleSignOut(ctx) | HandleSignOut(ctx) | ||||
go repo.StopJobsByUserID(ctx.User.ID) | |||||
ctx.Redirect(setting.AppSubURL + "/") | ctx.Redirect(setting.AppSubURL + "/") | ||||
} | } | ||||