diff --git a/go.mod b/go.mod old mode 100755 new mode 100644 index 3b83aced9..7ea7d4aff --- a/go.mod +++ b/go.mod @@ -51,6 +51,7 @@ require ( github.com/go-enry/go-enry/v2 v2.3.0 github.com/go-git/go-billy/v5 v5.0.0 github.com/go-git/go-git/v5 v5.0.0 + github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a github.com/go-ini/ini v1.56.0 // indirect github.com/go-macaron/auth v0.0.0-20161228062157-884c0e6c9b92 github.com/go-openapi/jsonreference v0.19.3 // indirect @@ -61,6 +62,7 @@ require ( github.com/gobwas/glob v0.2.3 github.com/gogs/chardet v0.0.0-20191104214054-4b6791f73a28 github.com/gogs/cron v0.0.0-20171120032916-9f6c956d3e14 + github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.4.1 // indirect github.com/gomodule/redigo v2.0.0+incompatible github.com/google/go-github/v24 v24.0.1 @@ -105,7 +107,6 @@ require ( github.com/prometheus/procfs v0.0.4 // indirect github.com/quasoft/websspi v1.0.0 github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect - github.com/robfig/cron/v3 v3.0.1 github.com/satori/go.uuid v1.2.0 github.com/sergi/go-diff v1.1.0 github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect @@ -125,13 +126,12 @@ require ( github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594 github.com/yuin/goldmark-meta v1.1.0 golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 - golang.org/x/mod v0.3.0 // indirect - golang.org/x/net v0.0.0-20200513185701-a91f0712d120 + golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f - golang.org/x/text v0.3.2 + golang.org/x/sys v0.0.0-20210510120138-977fb7262007 + golang.org/x/text v0.3.3 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect - golang.org/x/tools v0.0.0-20200515220128-d3bf790afa53 + golang.org/x/tools v0.1.1 google.golang.org/appengine v1.6.5 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect diff --git a/go.sum b/go.sum old mode 100755 new mode 100644 index e0c11f261..b243822f3 --- a/go.sum +++ b/go.sum @@ -262,6 +262,8 @@ github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= github.com/go-git/go-git/v5 v5.0.0 h1:k5RWPm4iJwYtfWoxIJy4wJX9ON7ihPeZZYC1fLYDnpg= github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA= +github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a h1:v6zMvHuY9yue4+QkG/HQ/W67wvtQmWJ4SDo9aK/GIno= +github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a/go.mod h1:I79BieaU4fxrw4LMXby6q5OS9XnoR9UIKLOzDFjUmuw= github.com/go-ini/ini v1.56.0 h1:6HjxSjqdmgnujDPhlzR4a44lxK3w03WPN8te0SoUSeM= github.com/go-ini/ini v1.56.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -358,7 +360,10 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -404,8 +409,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -468,7 +473,6 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -662,8 +666,6 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 h1:YDeskXpkNDhPdWN3REluVa46HQOVuVkjkd2sWnrABNQ= github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= -github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -711,14 +713,12 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -749,7 +749,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= @@ -804,20 +803,16 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yohcop/openid-go v1.0.0 h1:EciJ7ZLETHR3wOtxBvKXx9RV6eyHZpCaSZ1inbBaUXE= github.com/yohcop/openid-go v1.0.0/go.mod h1:/408xiwkeItSPJZSTPF7+VtZxPkPrRRpRNK2vjGh6yI= -github.com/yuin/goldmark v1.1.7/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27 h1:nqDD4MMMQA0lmWq03Z2/myGPYLQoXtmi0rGVs95ntbo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.30 h1:j4d4Lw3zqZelDhBksEo3BnWg9xhXRQGJPPSL6OApZjI= github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.5/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg= github.com/yuin/goldmark v1.4.6/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594 h1:yHfZyN55+5dp1wG7wDKv8HQ044moxkyGq12KFFMFDxg= github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594/go.mod h1:U9ihbh+1ZN7fR5Se3daSPoz1CGF9IYtSvWwVQtnzGHU= -github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60 h1:gZucqLjL1eDzVWrXj4uiWeMbAopJlBR2mKQAsTGdPwo= -github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60/go.mod h1:i9VhcIHN2PxXMbQrKqXNueok6QNONoPjNMoj9MygVL0= github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc= github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= @@ -859,14 +854,11 @@ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79 h1:IaQbIIB2X/Mp/DKctl6ROxz1KyMlKp4uyvL6+kQ7C88= -golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a h1:gHevYm0pO4QUbwy8Dmdr01R5r1BuKtfYqRqF0h/Cbh0= golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -882,6 +874,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -913,6 +907,8 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120 h1:EZ3cVSzKOlJxAd8e8YAJ7no8nNypTxexh/YE/xW3ZEY= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180620175406-ef147856a6dd/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -929,10 +925,11 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -967,10 +964,17 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1001,10 +1005,14 @@ golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515220128-d3bf790afa53 h1:vmsb6v0zUdmUlXfwKaYrHPPRCV0lHq/IwNIf0ASGjyQ= golang.org/x/tools v0.0.0-20200515220128-d3bf790afa53/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -1076,8 +1084,6 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.44.2/go.mod h1:M3Cogqpuv0QCi3ExAY5V4uOt4qb/R3xZubo9m8lK5wg= gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.52.0 h1:j+Lt/M1oPPejkniCg1TkWE2J3Eh1oZTsHSXzMTzUXn4= -gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.56.0 h1:DPMeDvGTM54DXbPkVIZsp19fp/I2K7zwA/itHYHKo8Y= gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ldap.v3 v3.0.2 h1:R6RBtabK6e1GO0eQKtkyOFbAHO73QesLzI2w2DZ6b9w= @@ -1098,7 +1104,6 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/models/ai_model_manage.go b/models/ai_model_manage.go index a88da8fe5..d9adda2dc 100644 --- a/models/ai_model_manage.go +++ b/models/ai_model_manage.go @@ -88,7 +88,7 @@ type AiModelQueryOptions struct { } func (a *AiModelConvert) IsGpuTrainTask() bool { - if a.SrcEngine == 0 || a.SrcEngine == 1 { + if a.SrcEngine == 0 || a.SrcEngine == 1 || a.SrcEngine == 4 || a.SrcEngine == 6 { return true } return false diff --git a/models/badge.go b/models/badge.go new file mode 100644 index 000000000..fcfbdc27f --- /dev/null +++ b/models/badge.go @@ -0,0 +1,181 @@ +package models + +import ( + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/timeutil" + "path/filepath" + "strings" + "xorm.io/builder" +) + +type Badge struct { + ID int64 `xorm:"pk autoincr"` + Name string + LightedIcon string `xorm:"varchar(2048)"` + GreyedIcon string `xorm:"varchar(2048)"` + Url string `xorm:"varchar(2048)"` + CategoryId int64 + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated"` + DeletedAt timeutil.TimeStamp `xorm:"deleted"` +} + +func (m *Badge) ToUserShow() *Badge4UserShow { + return &Badge4UserShow{ + Name: m.Name, + LightedIcon: GetIconOuterLink(m.LightedIcon), + GreyedIcon: GetIconOuterLink(m.GreyedIcon), + Url: m.Url, + } +} + +type GetBadgeOpts struct { + BadgeType BadgeType + CategoryId int64 + ListOpts ListOptions +} + +type BadgeAndCategory struct { + Badge Badge `xorm:"extends"` + Category BadgeCategory `xorm:"extends"` +} + +func (*BadgeAndCategory) TableName() string { + return "badge" +} + +func (m *BadgeAndCategory) ToShow() *Badge4AdminShow { + return &Badge4AdminShow{ + ID: m.Badge.ID, + Name: m.Badge.Name, + LightedIcon: GetIconOuterLink(m.Badge.LightedIcon), + GreyedIcon: GetIconOuterLink(m.Badge.GreyedIcon), + Url: m.Badge.Url, + CategoryName: m.Category.Name, + CategoryId: m.Category.ID, + CreatedUnix: m.Badge.CreatedUnix, + UpdatedUnix: m.Badge.UpdatedUnix, + } +} + +type Badge4AdminShow struct { + ID int64 + Name string + LightedIcon string + GreyedIcon string + Url string + CategoryName string + CategoryId int64 + CreatedUnix timeutil.TimeStamp + UpdatedUnix timeutil.TimeStamp +} + +func (m Badge4AdminShow) ToDTO() Badge { + return Badge{ + Name: m.Name, + LightedIcon: m.LightedIcon, + GreyedIcon: m.GreyedIcon, + Url: m.Url, + CategoryId: m.CategoryId, + } +} + +type BadgeOperateReq struct { + ID int64 + Name string + LightedIcon string + GreyedIcon string + Url string + CategoryId int64 +} + +func (m BadgeOperateReq) ToDTO() Badge { + return Badge{ + Name: m.Name, + LightedIcon: m.LightedIcon, + GreyedIcon: m.GreyedIcon, + Url: m.Url, + CategoryId: m.CategoryId, + } +} + +type Badge4UserShow struct { + Name string + LightedIcon string + GreyedIcon string + Url string +} + +type BadgeShowWithStatus struct { + Badge *Badge4UserShow + IsLighted bool +} + +type UserAllBadgeInCategory struct { + CategoryName string + CategoryId int64 + LightedNum int + Badges []*BadgeShowWithStatus +} + +func GetBadgeList(opts GetBadgeOpts) (int64, []*BadgeAndCategory, error) { + if opts.ListOpts.Page <= 0 { + opts.ListOpts.Page = 1 + } + var cond = builder.NewCond() + if opts.BadgeType > 0 { + cond = cond.And(builder.Eq{"badge_category.type": opts.BadgeType}) + } + if opts.CategoryId > 0 { + cond = cond.And(builder.Eq{"badge_category.id": opts.CategoryId}) + } + n, err := x.Join("INNER", "badge_category", "badge_category.ID = badge.category_id").Where(cond).Count(&BadgeAndCategory{}) + if err != nil { + return 0, nil, err + } + r := make([]*BadgeAndCategory, 0) + if err = x.Join("INNER", "badge_category", "badge_category.ID = badge.category_id").Where(cond).OrderBy("badge.created_unix desc").Limit(opts.ListOpts.PageSize, (opts.ListOpts.Page-1)*opts.ListOpts.PageSize).Find(&r); err != nil { + return 0, nil, err + } + return n, r, nil +} + +func AddBadge(m Badge) (int64, error) { + return x.Insert(&m) +} + +func UpdateBadgeById(id int64, param Badge) (int64, error) { + return x.ID(id).Update(¶m) +} + +func DelBadge(id int64) (int64, error) { + return x.ID(id).Delete(&Badge{}) +} + +func GetBadgeById(id int64) (*Badge, error) { + m := &Badge{} + has, err := x.ID(id).Get(m) + if err != nil { + return nil, err + } else if !has { + return nil, &ErrRecordNotExist{} + } + return m, nil +} + +func GetBadgeByCategoryId(categoryId int64) ([]*Badge, error) { + r := make([]*Badge, 0) + err := x.Where("category_id = ?", categoryId).Find(&r) + return r, err +} + +func GetCustomIconByHash(hash string) string { + if len(hash) == 0 { + return "" + } + return filepath.Join(setting.IconUploadPath, hash) +} + +func GetIconOuterLink(hash string) string { + return strings.TrimRight(setting.AppSubURL, "/") + "/show/icon/" + hash +} diff --git a/models/badge_category.go b/models/badge_category.go new file mode 100644 index 000000000..069fb6b10 --- /dev/null +++ b/models/badge_category.go @@ -0,0 +1,94 @@ +package models + +import "code.gitea.io/gitea/modules/timeutil" + +type BadgeType int + +const ( + CustomizeBadge = iota + 1 + SystemBadge +) + +type BadgeCategory struct { + ID int64 `xorm:"pk autoincr"` + Name string + Position int64 + Type BadgeType + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated"` + DeletedAt timeutil.TimeStamp `xorm:"deleted"` +} + +func (m *BadgeCategory) ToShow() *BadgeCategory4Show { + return &BadgeCategory4Show{ + ID: m.ID, + Name: m.Name, + Position: m.Position, + Type: m.Type, + CreatedUnix: m.CreatedUnix, + } +} + +type BadgeCategory4Show struct { + ID int64 `xorm:"pk autoincr"` + Name string + Position int64 + Type BadgeType + CreatedUnix timeutil.TimeStamp `xorm:"created"` +} + +func (m BadgeCategory4Show) ToDTO() BadgeCategory { + return BadgeCategory{ + ID: m.ID, + Name: m.Name, + Position: m.Position, + Type: m.Type, + CreatedUnix: m.CreatedUnix, + } +} + +func GetBadgeCategoryListPaging(opts ListOptions) (int64, []*BadgeCategory, error) { + n, err := x.Count(&BadgeCategory{}) + if err != nil { + return 0, nil, err + } + if opts.Page <= 0 { + opts.Page = 1 + } + r := make([]*BadgeCategory, 0) + if err := x.OrderBy("position asc,created_unix desc").Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).Find(&r); err != nil { + return 0, nil, err + } + return n, r, nil +} + +func GetBadgeCategoryList() ([]*BadgeCategory, error) { + r := make([]*BadgeCategory, 0) + if err := x.OrderBy("position asc,created_unix desc").Find(&r); err != nil { + return nil, err + } + return r, nil +} + +func AddBadgeCategory(m BadgeCategory) (int64, error) { + return x.Insert(&m) +} + +func UpdateBadgeCategoryById(id int64, param BadgeCategory) (int64, error) { + return x.ID(id).Update(¶m) +} + +func DelBadgeCategory(id int64) (int64, error) { + return x.ID(id).Delete(&BadgeCategory{}) +} + +func GetBadgeCategoryById(id int64) (*BadgeCategory, error) { + m := &BadgeCategory{} + has, err := x.ID(id).Get(m) + if err != nil { + return nil, err + } else if !has { + return nil, ErrRecordNotExist{} + } + return m, nil +} diff --git a/models/badge_user.go b/models/badge_user.go new file mode 100644 index 000000000..9b556bc0e --- /dev/null +++ b/models/badge_user.go @@ -0,0 +1,159 @@ +package models + +import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/builder" +) + +const ( + ActionAddBadgeUser = 1 + ActionDelBadgeUser = 2 +) + +type BadgeUser struct { + ID int64 `xorm:"pk autoincr"` + UserId int64 `xorm:"unique(user_badge)"` + BadgeId int64 `xorm:"unique(user_badge) index"` + CreatedUnix timeutil.TimeStamp `xorm:"created index"` +} + +type BadgeUserLog struct { + ID int64 `xorm:"pk autoincr"` + UserId int64 `xorm:"index"` + BadgeId int64 `xorm:"index"` + Action int + CreatedUnix timeutil.TimeStamp `xorm:"created index"` +} + +type BadgeUserDetail struct { + BadgeUser BadgeUser `xorm:"extends"` + User User `xorm:"extends"` +} + +func (*BadgeUserDetail) TableName() string { + return "badge_user" +} + +func (m *BadgeUserDetail) ToShow() *BadgeUser4SHow { + return &BadgeUser4SHow{ + ID: m.BadgeUser.ID, + UserId: m.BadgeUser.UserId, + Name: m.User.Name, + Avatar: m.User.RelAvatarLink(), + Email: m.User.Email, + CreatedUnix: m.BadgeUser.CreatedUnix, + } +} + +type BadgeUser4SHow struct { + ID int64 + UserId int64 + Name string + Avatar string + Email string + CreatedUnix timeutil.TimeStamp +} + +type AddBadgeUsersReq struct { + BadgeId int64 + Users string +} +type DelBadgeUserReq struct { + ID int64 +} + +type GetUserBadgesOpts struct { + CategoryId int64 + ListOptions +} + +func AddBadgeUser(m BadgeUser) (int64, error) { + sess := x.NewSession() + defer sess.Close() + sess.Begin() + n, err := sess.Insert(&m) + if err != nil || n == 0 { + return 0, err + } + _, err = sess.Insert(&BadgeUserLog{ + UserId: m.UserId, + BadgeId: m.BadgeId, + Action: ActionAddBadgeUser, + }) + if err != nil { + sess.Rollback() + return 0, err + } + return n, sess.Commit() +} + +func DelBadgeUser(id int64) (int64, error) { + m := BadgeUser{} + has, err := x.ID(id).Get(&m) + if err != nil { + return 0, err + } + if !has { + return 0, ErrRecordNotExist{} + } + sess := x.NewSession() + defer sess.Close() + sess.Begin() + n, err := x.ID(m.ID).Delete(&BadgeUser{}) + if err != nil || n == 0 { + return 0, err + } + _, err = sess.Insert(&BadgeUserLog{ + UserId: m.UserId, + BadgeId: m.BadgeId, + Action: ActionDelBadgeUser, + }) + if err != nil { + sess.Rollback() + return 0, err + } + return n, sess.Commit() +} + +func GetBadgeUsers(badgeId int64, opts ListOptions) (int64, []BadgeUserDetail, error) { + n, err := x.Join("LEFT", "public.user", "public.user.ID = badge_user.user_id").Where("badge_user.badge_id = ?", badgeId).Count(&BadgeUserDetail{}) + if err != nil { + return 0, nil, err + } + if opts.Page <= 0 { + opts.Page = 1 + } + m := make([]BadgeUserDetail, 0) + err = x.Join("LEFT", "public.user", "public.user.ID = badge_user.user_id").Where("badge_user.badge_id = ?", badgeId).OrderBy("badge_user.id desc").Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).Find(&m) + if err != nil { + return 0, nil, err + } + return n, m, nil +} + +func GetUserBadgesPaging(userId int64, opts GetUserBadgesOpts) ([]*Badge, error) { + cond := builder.NewCond() + cond = cond.And(builder.Eq{"badge_user.user_id": userId}) + if opts.CategoryId > 0 { + cond = cond.And(builder.Eq{"badge.category_id": opts.CategoryId}) + } + + r := make([]*Badge, 0) + err := x.Join("INNER", "badge_user", "badge_user.badge_id = badge.id").Where(cond).OrderBy("badge_user.id desc").Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).Find(&r) + return r, err +} +func CountUserBadges(userId int64) (int64, error) { + return x.Where("user_id = ?", userId).Count(&BadgeUser{}) +} + +func GetUserBadges(userId, categoryId int64) ([]*Badge, error) { + cond := builder.NewCond() + cond = cond.And(builder.Eq{"badge_user.user_id": userId}) + if categoryId > 0 { + cond = cond.And(builder.Eq{"badge.category_id": categoryId}) + } + + r := make([]*Badge, 0) + err := x.Join("INNER", "badge_user", "badge_user.badge_id = badge.id").Where(cond).OrderBy("badge_user.created_unix desc").Find(&r) + return r, err +} diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 6135dac40..dacb1b03a 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -116,6 +116,8 @@ const ( GrampusStatusStopped = "STOPPED" GrampusStatusUnknown = "UNKNOWN" GrampusStatusWaiting = "WAITING" + + ModelSuffix = "models.zip" ) const ( @@ -1070,6 +1072,12 @@ type CreateInferenceJobParams struct { InfConfig InfConfig `json:"config"` WorkspaceID string `json:"workspace_id"` } +type CreateInfUserImageParams struct { + JobName string `json:"job_name"` + Description string `json:"job_desc"` + Config InfUserImageConfig `json:"config"` + WorkspaceID string `json:"workspace_id"` +} type InfConfig struct { WorkServerNum int `json:"worker_server_num"` @@ -1084,6 +1092,21 @@ type InfConfig struct { PoolID string `json:"pool_id"` } +type InfUserImageConfig struct { + WorkServerNum int `json:"worker_server_num"` + AppUrl string `json:"app_url"` //训练作业的代码目录 + BootFileUrl string `json:"boot_file_url"` //训练作业的代码启动文件,需要在代码目录下 + Parameter []Parameter `json:"parameter"` + DataUrl string `json:"data_url"` //训练作业需要的数据集OBS路径URL + EngineID int64 `json:"engine_id"` + LogUrl string `json:"log_url"` + CreateVersion bool `json:"create_version"` + Flavor Flavor `json:"flavor"` + PoolID string `json:"pool_id"` + UserImageUrl string `json:"user_image_url"` + UserCommand string `json:"user_command"` +} + type CreateTrainJobVersionParams struct { Description string `json:"job_desc"` Config TrainJobVersionConfig `json:"config"` @@ -2015,11 +2038,6 @@ func GetModelSafetyTestTask() ([]*Cloudbrain, error) { return cloudbrains, err } -func GetCloudbrainCountByUserID(userID int64, jobType string) (int, error) { - count, err := x.In("status", JobWaiting, JobRunning).And("job_type = ? and user_id = ? and type = ?", jobType, userID, TypeCloudBrainOne).Count(new(Cloudbrain)) - return int(count), err -} - func GetCloudbrainRunCountByRepoID(repoID int64) (int, error) { count, err := x.In("status", JobWaiting, JobRunning, ModelArtsCreateQueue, ModelArtsCreating, ModelArtsStarting, ModelArtsReadyToStart, ModelArtsResizing, ModelArtsStartQueuing, ModelArtsRunning, ModelArtsDeleting, ModelArtsRestarting, ModelArtsTrainJobInit, @@ -2028,13 +2046,8 @@ func GetCloudbrainRunCountByRepoID(repoID int64) (int, error) { return int(count), err } -func GetBenchmarkCountByUserID(userID int64) (int, error) { - count, err := x.In("status", JobWaiting, JobRunning).And("(job_type = ? or job_type = ? or job_type = ?) and user_id = ? and type = ?", string(JobTypeBenchmark), string(JobTypeModelSafety), string(JobTypeBrainScore), string(JobTypeSnn4imagenet), userID, TypeCloudBrainOne).Count(new(Cloudbrain)) - return int(count), err -} - func GetModelSafetyCountByUserID(userID int64) (int, error) { - count, err := x.In("status", JobWaiting, JobRunning).And("job_type = ? and user_id = ?", string(JobTypeModelSafety), userID).Count(new(Cloudbrain)) + count, err := x.In("status", JobWaiting, JobRunning, ModelArtsTrainJobInit, ModelArtsTrainJobImageCreating, ModelArtsTrainJobSubmitTrying, ModelArtsTrainJobScaling, ModelArtsTrainJobCheckInit, ModelArtsTrainJobCheckRunning, ModelArtsTrainJobCheckRunningCompleted).And("job_type = ? and user_id = ?", string(JobTypeModelSafety), userID).Count(new(Cloudbrain)) return int(count), err } @@ -2048,40 +2061,14 @@ func GetWaitingCloudbrainCount(cloudbrainType int, computeResource string, jobTy } return sess.Count(new(Cloudbrain)) } - -func GetCloudbrainNotebookCountByUserID(userID int64) (int, error) { - count, err := x.In("status", ModelArtsCreateQueue, ModelArtsCreating, ModelArtsStarting, ModelArtsReadyToStart, ModelArtsResizing, ModelArtsStartQueuing, ModelArtsRunning, ModelArtsRestarting). - And("job_type = ? and user_id = ? and type in (?,?)", JobTypeDebug, userID, TypeCloudBrainTwo, TypeCDCenter).Count(new(Cloudbrain)) - return int(count), err -} - -func GetCloudbrainTrainJobCountByUserID(userID int64) (int, error) { - count, err := x.In("status", ModelArtsTrainJobInit, ModelArtsTrainJobImageCreating, ModelArtsTrainJobSubmitTrying, ModelArtsTrainJobWaiting, ModelArtsTrainJobRunning, ModelArtsTrainJobScaling, ModelArtsTrainJobCheckInit, ModelArtsTrainJobCheckRunning, ModelArtsTrainJobCheckRunningCompleted). - And("job_type = ? and user_id = ? and type = ?", JobTypeTrain, userID, TypeCloudBrainTwo).Count(new(Cloudbrain)) - return int(count), err -} - -func GetCloudbrainInferenceJobCountByUserID(userID int64) (int, error) { - count, err := x.In("status", ModelArtsTrainJobInit, ModelArtsTrainJobImageCreating, ModelArtsTrainJobSubmitTrying, ModelArtsTrainJobWaiting, ModelArtsTrainJobRunning, ModelArtsTrainJobScaling, ModelArtsTrainJobCheckInit, ModelArtsTrainJobCheckRunning, ModelArtsTrainJobCheckRunningCompleted). - And("job_type = ? and user_id = ? and type = ?", JobTypeInference, userID, TypeCloudBrainTwo).Count(new(Cloudbrain)) +func GetNotFinalStatusTaskCount(userID int64, notFinalStatus []string, jobTypes []JobType, cloudbrainTypes []int, computeResource string) (int, error) { + count, err := x.In("status", notFinalStatus). + In("job_type", jobTypes). + In("type", cloudbrainTypes). + And("user_id = ? and compute_resource = ?", userID, computeResource).Count(new(Cloudbrain)) return int(count), err } -func GetGrampusCountByUserID(userID int64, jobType, computeResource string) (int, error) { - count, err := x.In("status", GrampusStatusWaiting, GrampusStatusRunning).And("job_type = ? and user_id = ? and type = ?", jobType, userID, TypeC2Net).And("compute_resource = ?", computeResource).Count(new(Cloudbrain)) - return int(count), err -} - -func UpdateInferenceJob(job *Cloudbrain) error { - return updateInferenceJob(x, job) -} - -func updateInferenceJob(e Engine, job *Cloudbrain) error { - var sess *xorm.Session - sess = e.Where("job_id = ?", job.JobID) - _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix").Update(job) - return err -} func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) { sess := x.NewSession() defer sess.Close() @@ -2296,9 +2283,9 @@ func CloudbrainAllStatic(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, er } sess.Limit(opts.PageSize, start) } - sess.OrderBy("cloudbrain.created_unix DESC") + // sess.OrderBy("cloudbrain.created_unix DESC") cloudbrains := make([]*CloudbrainInfo, 0, setting.UI.IssuePagingNum) - if err := sess.Table(&Cloudbrain{}).Unscoped().Where(cond). + if err := sess.Cols("status", "type", "job_type", "train_job_duration", "duration", "compute_resource", "created_unix", "start_time", "end_time", "work_server_number").Table(&Cloudbrain{}).Unscoped().Where(cond). Find(&cloudbrains); err != nil { return nil, 0, fmt.Errorf("Find: %v", err) } @@ -2411,97 +2398,6 @@ var ( CloudbrainSpecialGpuInfosMap map[string]*GpuInfo ) -func InitCloudbrainOneResourceSpecMap() { - if CloudbrainDebugResourceSpecsMap == nil || len(CloudbrainDebugResourceSpecsMap) == 0 { - t := ResourceSpecs{} - json.Unmarshal([]byte(setting.ResourceSpecs), &t) - CloudbrainDebugResourceSpecsMap = make(map[int]*ResourceSpec, len(t.ResourceSpec)) - for _, spec := range t.ResourceSpec { - CloudbrainDebugResourceSpecsMap[spec.Id] = spec - } - } - if CloudbrainTrainResourceSpecsMap == nil || len(CloudbrainTrainResourceSpecsMap) == 0 { - t := ResourceSpecs{} - json.Unmarshal([]byte(setting.TrainResourceSpecs), &t) - CloudbrainTrainResourceSpecsMap = make(map[int]*ResourceSpec, len(t.ResourceSpec)) - for _, spec := range t.ResourceSpec { - CloudbrainTrainResourceSpecsMap[spec.Id] = spec - } - } - if CloudbrainInferenceResourceSpecsMap == nil || len(CloudbrainInferenceResourceSpecsMap) == 0 { - t := ResourceSpecs{} - json.Unmarshal([]byte(setting.InferenceResourceSpecs), &t) - CloudbrainInferenceResourceSpecsMap = make(map[int]*ResourceSpec, len(t.ResourceSpec)) - for _, spec := range t.ResourceSpec { - CloudbrainInferenceResourceSpecsMap[spec.Id] = spec - } - } - if CloudbrainBenchmarkResourceSpecsMap == nil || len(CloudbrainBenchmarkResourceSpecsMap) == 0 { - t := ResourceSpecs{} - json.Unmarshal([]byte(setting.BenchmarkResourceSpecs), &t) - CloudbrainBenchmarkResourceSpecsMap = make(map[int]*ResourceSpec, len(t.ResourceSpec)) - for _, spec := range t.ResourceSpec { - CloudbrainBenchmarkResourceSpecsMap[spec.Id] = spec - } - } - if CloudbrainSpecialResourceSpecsMap == nil || len(CloudbrainSpecialResourceSpecsMap) == 0 { - t := SpecialPools{} - json.Unmarshal([]byte(setting.SpecialPools), &t) - for _, pool := range t.Pools { - CloudbrainSpecialResourceSpecsMap = make(map[int]*ResourceSpec, len(pool.ResourceSpec)) - for _, spec := range pool.ResourceSpec { - CloudbrainSpecialResourceSpecsMap[spec.Id] = spec - } - } - } - SpecsMapInitFlag = true -} - -func InitCloudbrainOneGpuInfoMap() { - if CloudbrainDebugGpuInfosMap == nil || len(CloudbrainDebugGpuInfosMap) == 0 { - t := GpuInfos{} - json.Unmarshal([]byte(setting.GpuTypes), &t) - CloudbrainDebugGpuInfosMap = make(map[string]*GpuInfo, len(t.GpuInfo)) - for _, GpuInfo := range t.GpuInfo { - CloudbrainDebugGpuInfosMap[GpuInfo.Queue] = GpuInfo - } - } - if CloudbrainTrainGpuInfosMap == nil || len(CloudbrainTrainGpuInfosMap) == 0 { - t := GpuInfos{} - json.Unmarshal([]byte(setting.TrainGpuTypes), &t) - CloudbrainTrainGpuInfosMap = make(map[string]*GpuInfo, len(t.GpuInfo)) - for _, GpuInfo := range t.GpuInfo { - CloudbrainTrainGpuInfosMap[GpuInfo.Queue] = GpuInfo - } - } - if CloudbrainInferenceGpuInfosMap == nil || len(CloudbrainInferenceGpuInfosMap) == 0 { - t := GpuInfos{} - json.Unmarshal([]byte(setting.InferenceGpuTypes), &t) - CloudbrainInferenceGpuInfosMap = make(map[string]*GpuInfo, len(t.GpuInfo)) - for _, GpuInfo := range t.GpuInfo { - CloudbrainInferenceGpuInfosMap[GpuInfo.Queue] = GpuInfo - } - } - if CloudbrainBenchmarkGpuInfosMap == nil || len(CloudbrainBenchmarkGpuInfosMap) == 0 { - t := GpuInfos{} - json.Unmarshal([]byte(setting.BenchmarkGpuTypes), &t) - CloudbrainBenchmarkGpuInfosMap = make(map[string]*GpuInfo, len(t.GpuInfo)) - for _, GpuInfo := range t.GpuInfo { - CloudbrainBenchmarkGpuInfosMap[GpuInfo.Queue] = GpuInfo - } - } - if CloudbrainSpecialGpuInfosMap == nil || len(CloudbrainSpecialGpuInfosMap) == 0 { - t := SpecialPools{} - json.Unmarshal([]byte(setting.SpecialPools), &t) - for _, pool := range t.Pools { - CloudbrainSpecialGpuInfosMap = make(map[string]*GpuInfo, len(pool.Pool)) - for _, GpuInfo := range pool.Pool { - CloudbrainSpecialGpuInfosMap[GpuInfo.Queue] = GpuInfo - } - } - } - GpuInfosMapInitFlag = true -} func GetNewestJobsByAiCenter() ([]int64, error) { ids := make([]int64, 0) return ids, x. diff --git a/models/cloudbrain_spec.go b/models/cloudbrain_spec.go index c32e4b0fd..49a4d603e 100644 --- a/models/cloudbrain_spec.go +++ b/models/cloudbrain_spec.go @@ -9,7 +9,7 @@ type CloudbrainSpec struct { SpecId int64 `xorm:"index"` SourceSpecId string AccCardsNum int - AccCardType string + AccCardType string `xorm:"index"` CpuCores int MemGiB float32 GPUMemGiB float32 @@ -19,7 +19,7 @@ type CloudbrainSpec struct { QueueId int64 QueueCode string Cluster string - AiCenterCode string + AiCenterCode string `xorm:"index"` AiCenterName string IsExclusive bool ExclusiveOrg string diff --git a/models/cloudbrain_static.go b/models/cloudbrain_static.go index 48df111a0..19e55fb6d 100644 --- a/models/cloudbrain_static.go +++ b/models/cloudbrain_static.go @@ -1,6 +1,7 @@ package models import ( + "fmt" "strconv" "time" @@ -38,6 +39,60 @@ type TaskDetail struct { Spec *Specification `json:"Spec"` } +type CloudbrainDurationStatistic struct { + ID int64 `xorm:"pk autoincr"` + Cluster string + AiCenterCode string + AiCenterName string + ComputeResource string + AccCardType string + + DateTime string + DayTime string + HourTime int + CardsUseDuration int + CardsTotalDuration int + CardsTotalNum int + + DeletedUnix timeutil.TimeStamp `xorm:"deleted"` + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated"` +} +type DurationStatisticOptions struct { + BeginTime time.Time + EndTime time.Time + AiCenterCode string +} + +type DurationRateStatistic struct { + AiCenterTotalDurationStat map[string]int `json:"aiCenterTotalDurationStat"` + AiCenterUsageDurationStat map[string]int `json:"aiCenterUsageDurationStat"` + UsageRate map[string]float64 `json:"UsageRate"` +} +type ResourceDetail struct { + QueueCode string + Cluster string `xorm:"notnull"` + AiCenterCode string + AiCenterName string + ComputeResource string + AccCardType string + CardsTotalNum int + IsAutomaticSync bool +} + +type DateUsageStatistic struct { + Date string `json:"date"` + UsageDuration int `json:"usageDuration"` + TotalDuration int `json:"totalDuration"` + UsageRate float64 `json:"usageRate"` +} + +type HourTimeStatistic struct { + HourTimeUsageDuration map[string]int `json:"hourTimeUsageDuration"` + HourTimeTotalDuration map[string]int `json:"hourTimeTotalDuration"` + HourTimeUsageRate map[string]float64 `json:"hourTimeUsageRate"` +} + func GetTodayCreatorCount(beginTime time.Time, endTime time.Time) (int64, error) { countSql := "SELECT count(distinct user_id) FROM " + "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + @@ -199,3 +254,121 @@ func GetRunHourPeriodCount(dateBeginTime string, dateEndTime string) (map[string } return dateHourMap, nil } + +func GetCloudbrainRunning() ([]*CloudbrainInfo, error) { + sess := x.NewSession() + defer sess.Close() + var cond = builder.NewCond() + cond = cond.And( + builder.Eq{"cloudbrain.status": string(JobRunning)}, + ) + sess.OrderBy("cloudbrain.created_unix ASC") + cloudbrains := make([]*CloudbrainInfo, 0, 10) + if err := sess.Table(&Cloudbrain{}).Where(cond). + Find(&cloudbrains); err != nil { + log.Info("find error.") + } + return cloudbrains, nil +} + +func GetCloudbrainByTime(beginTime int64, endTime int64) ([]*CloudbrainInfo, error) { + sess := x.NewSession() + defer sess.Close() + var cond = builder.NewCond() + cond = cond.And( + builder.And(builder.Gte{"cloudbrain.end_time": beginTime}, builder.Lte{"cloudbrain.end_time": endTime}), + ) + cond = cond.Or( + builder.Eq{"cloudbrain.status": string(JobRunning)}, + ) + sess.OrderBy("cloudbrain.created_unix ASC") + cloudbrains := make([]*CloudbrainInfo, 0, 10) + if err := sess.Table(&Cloudbrain{}).Unscoped().Where(cond). + Find(&cloudbrains); err != nil { + log.Info("find error.") + } + return cloudbrains, nil +} + +func GetSpecByAiCenterCodeAndType(aiCenterCode string, accCardType string) ([]*CloudbrainSpec, error) { + sess := x.NewSession() + defer sess.Close() + var cond = builder.NewCond() + cond = cond.And( + builder.And(builder.Eq{"cloudbrain_spec.ai_center_code": aiCenterCode}, builder.Eq{"cloudbrain_spec.acc_card_type": accCardType}), + ) + cloudbrainSpecs := make([]*CloudbrainSpec, 0, 10) + if err := sess.Table(&CloudbrainSpec{}).Where(cond). + Find(&cloudbrainSpecs); err != nil { + log.Info("find error.") + } + return cloudbrainSpecs, nil +} + +func InsertCloudbrainDurationStatistic(cloudbrainDurationStatistic *CloudbrainDurationStatistic) (int64, error) { + return xStatistic.Insert(cloudbrainDurationStatistic) +} + +func DeleteCloudbrainDurationStatisticHour(date string, hour int, aiCenterCode string, accCardType string) error { + sess := xStatistic.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return fmt.Errorf("Begin: %v", err) + } + + if _, err := sess.Where("day_time = ? AND hour_time = ? AND ai_center_code = ? AND acc_card_type = ?", date, hour, aiCenterCode, accCardType).Delete(&CloudbrainDurationStatistic{}); err != nil { + return fmt.Errorf("Delete: %v", err) + } + + if err := sess.Commit(); err != nil { + sess.Close() + return fmt.Errorf("Commit: %v", err) + } + + sess.Close() + return nil +} + +func GetCanUseCardInfo() ([]*ResourceQueue, error) { + sess := x.NewSession() + defer sess.Close() + sess.OrderBy("resource_queue.id ASC") + ResourceQueues := make([]*ResourceQueue, 0, 10) + if err := sess.Table(&ResourceQueue{}).Find(&ResourceQueues); err != nil { + log.Info("find error.") + } + return ResourceQueues, nil +} + +func GetCardDurationStatistics(opts *DurationStatisticOptions) ([]*CloudbrainDurationStatistic, error) { + sess := xStatistic.NewSession() + defer sess.Close() + var cond = builder.NewCond() + if opts.BeginTime.Unix() > 0 && opts.EndTime.Unix() > 0 { + cond = cond.And( + builder.And(builder.Gte{"cloudbrain_duration_statistic.created_unix": opts.BeginTime.Unix()}, builder.Lte{"cloudbrain_duration_statistic.created_unix": opts.EndTime.Unix()}), + ) + } + if opts.AiCenterCode != "" { + cond = cond.And( + builder.Eq{"cloudbrain_duration_statistic.ai_center_code": opts.AiCenterCode}, + ) + } + CloudbrainDurationStatistics := make([]*CloudbrainDurationStatistic, 0, 10) + if err := sess.Table(&CloudbrainDurationStatistic{}).Where(cond). + Find(&CloudbrainDurationStatistics); err != nil { + log.Info("find error.") + } + return CloudbrainDurationStatistics, nil +} + +func GetDurationRecordBeginTime() ([]*CloudbrainDurationStatistic, error) { + sess := xStatistic.NewSession() + defer sess.Close() + sess.OrderBy("cloudbrain_duration_statistic.id ASC limit 1") + CloudbrainDurationStatistics := make([]*CloudbrainDurationStatistic, 0) + if err := sess.Table(&CloudbrainDurationStatistic{}).Find(&CloudbrainDurationStatistics); err != nil { + log.Info("find error.") + } + return CloudbrainDurationStatistics, nil +} diff --git a/models/models.go b/models/models.go index 4c2079cd8..a4ec43f43 100755 --- a/models/models.go +++ b/models/models.go @@ -161,6 +161,11 @@ func init() { new(CloudbrainSpec), new(CloudbrainTemp), new(DatasetReference), + new(ScheduleRecord), + new(BadgeCategory), + new(Badge), + new(BadgeUser), + new(BadgeUserLog), ) tablesStatistic = append(tablesStatistic, @@ -179,6 +184,7 @@ func init() { new(UserMetrics), new(UserAnalysisPara), new(Invitation), + new(CloudbrainDurationStatistic), ) gonicNames := []string{"SSL", "UID"} diff --git a/models/repo.go b/models/repo.go index 2c4fda39b..6009c776f 100755 --- a/models/repo.go +++ b/models/repo.go @@ -454,6 +454,7 @@ func (repo *Repository) innerAPIFormat(e Engine, mode AccessMode, isParent bool) AllowRebaseMerge: allowRebaseMerge, AllowSquash: allowSquash, AvatarURL: repo.avatarLink(e), + Status: int(repo.Status), } } diff --git a/models/reward_operate_record.go b/models/reward_operate_record.go index f201be646..d9114d166 100644 --- a/models/reward_operate_record.go +++ b/models/reward_operate_record.go @@ -249,22 +249,23 @@ type AdminRewardOperateReq struct { } type RewardOperateRecordShow struct { - SerialNo string - Status string - OperateType string - SourceId string - Amount int64 - LossAmount int64 - BalanceAfter int64 - Remark string - SourceType string - UserName string - LastOperateDate timeutil.TimeStamp - UnitPrice int64 - SuccessCount int - Action *ActionShow - Cloudbrain *CloudbrainShow - AdminLog *RewardAdminLogShow + SerialNo string + Status string + OperateType string + SourceId string + Amount int64 + LossAmount int64 + BalanceAfter int64 + Remark string + SourceType string + SourceTemplateId string + UserName string + LastOperateDate timeutil.TimeStamp + UnitPrice int64 + SuccessCount int + Action *ActionShow + Cloudbrain *CloudbrainShow + AdminLog *RewardAdminLogShow } func getPointOperateRecord(tl *RewardOperateRecord) (*RewardOperateRecord, error) { @@ -419,7 +420,7 @@ func GetRewardRecordShowList(opts *RewardRecordListOpts) (RewardRecordShowList, r := make([]*RewardOperateRecordShow, 0) err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no", "reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount", - "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", + "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", "reward_operate_record.source_template_id", "reward_operate_record.last_operate_unix as last_operate_date"). Where(cond).Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).OrderBy(string(opts.OrderBy)).Find(&r) @@ -441,7 +442,7 @@ func GetAdminRewardRecordShowList(opts *RewardRecordListOpts) (RewardRecordShowL case OperateTypeIncrease: err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no", "reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount", - "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", + "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", "reward_operate_record.source_template_id", "reward_operate_record.last_operate_unix as last_operate_date", "public.user.name as user_name", "point_account_log.balance_after"). Join("LEFT", "public.user", "reward_operate_record.user_id = public.user.id"). @@ -450,7 +451,7 @@ func GetAdminRewardRecordShowList(opts *RewardRecordListOpts) (RewardRecordShowL case OperateTypeDecrease: err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no", "reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount", - "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", + "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", "reward_operate_record.source_template_id", "reward_operate_record.last_operate_unix as last_operate_date", "public.user.name as user_name", "reward_periodic_task.amount as unit_price", "reward_periodic_task.success_count"). Join("LEFT", "public.user", "reward_operate_record.user_id = public.user.id"). diff --git a/models/schedule_record.go b/models/schedule_record.go new file mode 100755 index 000000000..17963abb5 --- /dev/null +++ b/models/schedule_record.go @@ -0,0 +1,70 @@ +package models + +import ( + "fmt" + "time" + + "code.gitea.io/gitea/modules/timeutil" +) + +const ( + StorageScheduleSucceed int = iota + StorageScheduleProcessing + StorageScheduleFailed + StorageNoFile + StorageScheduleWaiting +) + +type ScheduleRecord struct { + ID int64 `xorm:"pk autoincr"` + CloudbrainID int64 `xorm:"INDEX NOT NULL unique"` + EndPoint string `xorm:"INDEX NOT NULL"` + Bucket string `xorm:"INDEX NOT NULL"` + ObjectKey string `xorm:"INDEX NOT NULL"` + ProxyServer string `xorm:"INDEX NOT NULL"` + Status int `xorm:"INDEX NOT NULL DEFAULT 0"` + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated"` + DeletedAt time.Time `xorm:"deleted"` +} + +func updateScheduleCols(e Engine, record *ScheduleRecord, cols ...string) error { + _, err := e.ID(record.ID).Cols(cols...).Update(record) + return err +} + +func UpdateScheduleCols(record *ScheduleRecord, cols ...string) error { + return updateScheduleCols(x, record, cols...) +} + +func GetSchedulingRecord() ([]*ScheduleRecord, error) { + records := make([]*ScheduleRecord, 0, 10) + return records, x. + Where("status = ?", StorageScheduleProcessing). + Limit(100). + Find(&records) +} + +func InsertScheduleRecord(record *ScheduleRecord) (_ *ScheduleRecord, err error) { + + if _, err := x.Insert(record); err != nil { + return nil, err + } + + return record, nil +} + +func getScheduleRecordByPrID(e Engine, cloudbrainId int64) (*ScheduleRecord, error) { + record := new(ScheduleRecord) + has, err := e.Where("cloudbrain_id = ?", cloudbrainId).Get(record) + if err != nil { + return nil, err + } else if !has { + return nil, fmt.Errorf("get record by cloudbrain_id failed(%d)", cloudbrainId) + } + return record, nil +} + +func GetScheduleRecordByCloudbrainID(cloudbrainId int64) (*ScheduleRecord, error) { + return getScheduleRecordByPrID(x, cloudbrainId) +} diff --git a/models/user.go b/models/user.go index f40eb699f..b21858e37 100755 --- a/models/user.go +++ b/models/user.go @@ -2184,3 +2184,24 @@ func GetBlockChainUnSuccessUsers() ([]*User, error) { Find(&users) return users, err } + +//GetUserIdsByUserNames Get userIDs in batches through username paging, this method will ignore errors +func GetUserIdsByUserNames(names []string) []int64 { + pageSize := 200 + length := len(names) + r := make([]int64, 0, length) + for i := 0; i < length; i = i + pageSize { + if length-i < 200 { + pageSize = length - i + } + userNameTemp := names[i : i+pageSize] + t := make([]int64, 0, length) + err := x.Table("public.user").Cols("id").In("name", userNameTemp).Find(&t) + if err != nil { + continue + } + r = append(r, t...) + + } + return r +} diff --git a/modules/aisafety/resty.go b/modules/aisafety/resty.go index be6468529..ce1fa736e 100644 --- a/modules/aisafety/resty.go +++ b/modules/aisafety/resty.go @@ -10,6 +10,7 @@ import ( "strings" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" "github.com/go-resty/resty/v2" ) @@ -71,8 +72,8 @@ func checkSetting() { } func loginCloudbrain() error { - HOST = "http://221.122.70.196:8081/atp-api" - KEY = "1" + HOST = setting.ModelSafetyTest.HOST + KEY = setting.ModelSafetyTest.KEY return nil } diff --git a/modules/convert/convert.go b/modules/convert/convert.go index a542fe78b..9eb2c519d 100755 --- a/modules/convert/convert.go +++ b/modules/convert/convert.go @@ -311,6 +311,7 @@ func ToOrganization(org *models.User) *api.Organization { Location: org.Location, Visibility: org.Visibility.String(), RepoAdminChangeTeamAccess: org.RepoAdminChangeTeamAccess, + NumRepos: org.NumRepos, } } diff --git a/modules/cron/tasks_basic.go b/modules/cron/tasks_basic.go index 8dbc8d1ed..985a82cdb 100755 --- a/modules/cron/tasks_basic.go +++ b/modules/cron/tasks_basic.go @@ -5,6 +5,7 @@ package cron import ( + "code.gitea.io/gitea/modules/urfs_client/urchin" "context" "time" @@ -222,6 +223,17 @@ func registerSyncCloudbrainStatus() { }) } +func registerHandleScheduleRecord() { + RegisterTaskFatal("handle_schedule_record", &BaseConfig{ + Enabled: true, + RunAtStart: false, + Schedule: "@every 1m", + }, func(ctx context.Context, _ *models.User, _ Config) error { + urchin.HandleScheduleRecords() + return nil + }) +} + func registerRewardPeriodTask() { RegisterTaskFatal("reward_period_task", &BaseConfig{ Enabled: true, @@ -266,6 +278,17 @@ func registerSyncModelArtsTempJobs() { }) } +func registerHandleCloudbrainDurationStatistic() { + RegisterTaskFatal("handle_cloudbrain_duration_statistic", &BaseConfig{ + Enabled: true, + RunAtStart: false, + Schedule: "1 0 * * * ?", + }, func(ctx context.Context, _ *models.User, _ Config) error { + repo.CloudbrainDurationStatisticHour() + return nil + }) +} + func initBasicTasks() { registerUpdateMirrorTask() registerRepoHealthCheck() @@ -293,4 +316,7 @@ func initBasicTasks() { registerCloudbrainPointDeductTask() registerHandleModelSafetyTask() + + registerHandleScheduleRecord() + registerHandleCloudbrainDurationStatistic() } diff --git a/modules/grampus/grampus.go b/modules/grampus/grampus.go index 7bacb46d3..35ea815b5 100755 --- a/modules/grampus/grampus.go +++ b/modules/grampus/grampus.go @@ -1,16 +1,15 @@ package grampus import ( - "code.gitea.io/gitea/modules/cloudbrain" "encoding/json" "strings" - "code.gitea.io/gitea/modules/setting" - "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/cloudbrain" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/notification" + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" ) @@ -20,10 +19,15 @@ const ( ProcessorTypeNPU = "npu.huawei.com/NPU" ProcessorTypeGPU = "nvidia.com/gpu" - GpuWorkDir = "/tmp/" - NpuWorkDir = "/cache/" + GpuWorkDir = "/tmp/" + NpuWorkDir = "/cache/" + NpuLocalLogUrl = "/tmp/train.log" + CommandPrepareScriptNpu = ";mkdir -p output;mkdir -p code;mkdir -p dataset;mkdir -p pretrainmodel;" CodeArchiveName = "master.zip" + + BucketRemote = "grampus" + RemoteModelPath = "/output/" + models.ModelSuffix ) var ( @@ -33,7 +37,7 @@ var ( SpecialPools *models.SpecialPools - CommandPrepareScript = ";mkdir -p output;mkdir -p code;mkdir -p dataset;mkdir -p pretrainmodel;echo \"start loading script\";wget -q https://git.openi.org.cn/OpenIOSSG/%s/archive/master.zip;" + + CommandPrepareScriptGpu = ";mkdir -p output;mkdir -p code;mkdir -p dataset;mkdir -p pretrainmodel;echo \"start loading script\";wget -q https://git.openi.org.cn/OpenIOSSG/%s/archive/master.zip;" + "echo \"finish loading script\";unzip -q master.zip;cd %s;chmod 777 downloader_for_obs uploader_for_npu downloader_for_minio uploader_for_gpu;" ) @@ -273,3 +277,35 @@ func InitSpecialPool() { json.Unmarshal([]byte(setting.Grampus.SpecialPools), &SpecialPools) } } + +func GetNpuModelRemoteObsUrl(jobName string) string { + return "s3:///" + BucketRemote + "/" + GetNpuModelObjectKey(jobName) +} + +func GetNpuModelObjectKey(jobName string) string { + return setting.CodePathPrefix + jobName + RemoteModelPath +} + +func GetRemoteEndPoint(aiCenterID string) string { + var endPoint string + for _, info := range setting.CenterInfos.Info { + if info.CenterID == aiCenterID { + endPoint = info.Endpoint + break + } + } + + return endPoint +} + +func GetCenterProxy(aiCenterID string) string { + var proxy string + for _, info := range setting.CenterInfos.Info { + if info.CenterID == aiCenterID { + proxy = info.StorageProxyServer + break + } + } + + return proxy +} diff --git a/modules/modelarts/modelarts.go b/modules/modelarts/modelarts.go index 06521993e..567f6d620 100755 --- a/modules/modelarts/modelarts.go +++ b/modules/modelarts/modelarts.go @@ -143,6 +143,8 @@ type GenerateInferenceJobReq struct { Spec *models.Specification DatasetName string JobType string + UserImageUrl string + UserCommand string } type VersionInfo struct { @@ -682,26 +684,51 @@ func GetOutputPathByCount(TotalVersionCount int) (VersionOutputPath string) { func GenerateInferenceJob(ctx *context.Context, req *GenerateInferenceJobReq) (err error) { createTime := timeutil.TimeStampNow() - jobResult, err := createInferenceJob(models.CreateInferenceJobParams{ - JobName: req.JobName, - Description: req.Description, - InfConfig: models.InfConfig{ - WorkServerNum: req.WorkServerNumber, - AppUrl: req.CodeObsPath, - BootFileUrl: req.BootFileUrl, - DataUrl: req.DataUrl, - EngineID: req.EngineID, - // TrainUrl: req.TrainUrl, - LogUrl: req.LogUrl, - PoolID: req.PoolID, - CreateVersion: true, - Flavor: models.Flavor{ - Code: req.Spec.SourceSpecId, + var jobResult *models.CreateTrainJobResult + var createErr error + if req.EngineID < 0 { + jobResult, createErr = createInferenceJobUserImage(models.CreateInfUserImageParams{ + JobName: req.JobName, + Description: req.Description, + Config: models.InfUserImageConfig{ + WorkServerNum: req.WorkServerNumber, + AppUrl: req.CodeObsPath, + BootFileUrl: req.BootFileUrl, + DataUrl: req.DataUrl, + // TrainUrl: req.TrainUrl, + LogUrl: req.LogUrl, + PoolID: req.PoolID, + CreateVersion: true, + Flavor: models.Flavor{ + Code: req.Spec.SourceSpecId, + }, + Parameter: req.Parameters, + UserImageUrl: req.UserImageUrl, + UserCommand: req.UserCommand, }, - Parameter: req.Parameters, - }, - }) - if err != nil { + }) + } else { + jobResult, createErr = createInferenceJob(models.CreateInferenceJobParams{ + JobName: req.JobName, + Description: req.Description, + InfConfig: models.InfConfig{ + WorkServerNum: req.WorkServerNumber, + AppUrl: req.CodeObsPath, + BootFileUrl: req.BootFileUrl, + DataUrl: req.DataUrl, + EngineID: req.EngineID, + // TrainUrl: req.TrainUrl, + LogUrl: req.LogUrl, + PoolID: req.PoolID, + CreateVersion: true, + Flavor: models.Flavor{ + Code: req.Spec.SourceSpecId, + }, + Parameter: req.Parameters, + }, + }) + } + if createErr != nil { log.Error("createInferenceJob failed: %v", err.Error()) if strings.HasPrefix(err.Error(), UnknownErrorPrefix) { log.Info("(%s)unknown error, set temp status", req.DisplayJobName) diff --git a/modules/modelarts/resty.go b/modules/modelarts/resty.go index fd1c467f3..c38300606 100755 --- a/modules/modelarts/resty.go +++ b/modules/modelarts/resty.go @@ -1197,6 +1197,66 @@ sendjob: return &result, nil } +func createInferenceJobUserImage(createJobParams models.CreateInfUserImageParams) (*models.CreateTrainJobResult, error) { + checkSetting() + client := getRestyClient() + var result models.CreateTrainJobResult + + retry := 0 + +sendjob: + res, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetAuthToken(TOKEN). + SetBody(createJobParams). + SetResult(&result). + Post(HOST + "/v1/" + setting.ProjectID + urlTrainJob) + + if err != nil { + return nil, fmt.Errorf("resty create train-job: %s", err) + } + + req, _ := json.Marshal(createJobParams) + log.Info("%s", req) + + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + if res.StatusCode() != http.StatusOK { + var temp models.ErrorResult + if err = json.Unmarshal([]byte(res.String()), &temp); err != nil { + log.Error("json.Unmarshal failed(%s): %v", res.String(), err.Error()) + return &result, fmt.Errorf("json.Unmarshal failed(%s): %v", res.String(), err.Error()) + } + log.Error("createInferenceJobUserImage failed(%d):%s(%s)", res.StatusCode(), temp.ErrorCode, temp.ErrorMsg) + bootFileErrorMsg := "Invalid OBS path '" + createJobParams.Config.BootFileUrl + "'." + dataSetErrorMsg := "Invalid OBS path '" + createJobParams.Config.DataUrl + "'." + if temp.ErrorMsg == bootFileErrorMsg { + log.Error("启动文件错误!createInferenceJobUserImage failed(%d):%s(%s)", res.StatusCode(), temp.ErrorCode, temp.ErrorMsg) + return &result, fmt.Errorf("启动文件错误!") + } + if temp.ErrorMsg == dataSetErrorMsg { + log.Error("数据集错误!createInferenceJobUserImage failed(%d):%s(%s)", res.StatusCode(), temp.ErrorCode, temp.ErrorMsg) + return &result, fmt.Errorf("数据集错误!") + } + if res.StatusCode() == http.StatusBadGateway { + return &result, fmt.Errorf(UnknownErrorPrefix+"createInferenceJobUserImage failed(%d):%s(%s)", res.StatusCode(), temp.ErrorCode, temp.ErrorMsg) + } else { + return &result, fmt.Errorf("createInferenceJobUserImage failed(%d):%s(%s)", res.StatusCode(), temp.ErrorCode, temp.ErrorMsg) + } + } + + if !result.IsSuccess { + log.Error("createInferenceJobUserImage failed(%s): %s", result.ErrorCode, result.ErrorMsg) + return &result, fmt.Errorf("createInferenceJobUserImage failed(%s): %s", result.ErrorCode, result.ErrorMsg) + } + + return &result, nil +} + func createNotebook2(createJobParams models.CreateNotebook2Params) (*models.CreateNotebookResult, error) { checkSetting() client := getRestyClient() diff --git a/modules/setting/setting.go b/modules/setting/setting.go index c6afae05a..88557ee60 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -76,6 +76,17 @@ type C2NetSqInfos struct { C2NetSqInfo []*C2NetSequenceInfo `json:"sequence"` } +type AiCenterInfo struct { + CenterID string `json:"center_id"` + Name string `json:"name"` + Endpoint string `json:"endpoint"` + StorageProxyServer string `json:"storage_proxy_server"` +} + +type AiCenterInfos struct { + Info []*AiCenterInfo `json:"infos"` +} + type StFlavorInfos struct { FlavorInfo []*FlavorInfo `json:"flavor_info"` } @@ -594,9 +605,13 @@ var ( SpecialPools string C2NetSequence string SyncScriptProject string + LocalCenterID string + AiCenterInfo string }{} - C2NetInfos *C2NetSqInfos + C2NetInfos *C2NetSqInfos + CenterInfos *AiCenterInfos + C2NetMapInfo map[string]*C2NetSequenceInfo //elk config ElkUrl string @@ -622,6 +637,13 @@ var ( DeductTaskRange time.Duration DeductTaskRangeForFirst time.Duration + //badge config + BadgeIconMaxFileSize int64 + BadgeIconMaxWidth int + BadgeIconMaxHeight int + BadgeIconDefaultSize uint + IconUploadPath string + //wechat auto reply config UserNameOfWechatReply string RepoNameOfWechatReply string @@ -694,8 +716,12 @@ var ( GPU_PYTORCH_IMAGE string GpuQueue string GPU_TENSORFLOW_IMAGE string + GPU_PADDLE_IMAGE string + GPU_MXNET_IMAGE string NPU_MINDSPORE_16_IMAGE string PytorchOnnxBootFile string + PaddleOnnxBootFile string + MXnetOnnxBootFile string PytorchTrTBootFile string MindsporeBootFile string TensorFlowNpuBootFile string @@ -718,6 +744,9 @@ var ( GPUBaseDataSetUUID string GPUCombatDataSetName string GPUCombatDataSetUUID string + + HOST string + KEY string }{} ModelApp = struct { @@ -1531,6 +1560,14 @@ func NewContext() { CloudBrainPayInterval = sec.Key("CLOUDBRAIN_PAY_INTERVAL").MustDuration(60 * time.Minute) DeductTaskRange = sec.Key("DEDUCT_TASK_RANGE").MustDuration(30 * time.Minute) DeductTaskRangeForFirst = sec.Key("DEDUCT_TASK_RANGE_FOR_FIRST").MustDuration(3 * time.Hour) + + sec = Cfg.Section("icons") + BadgeIconMaxFileSize = sec.Key("BADGE_ICON_MAX_FILE_SIZE").MustInt64(1048576) + BadgeIconMaxWidth = sec.Key("BADGE_ICON_MAX_WIDTH").MustInt(4096) + BadgeIconMaxHeight = sec.Key("BADGE_ICON_MAX_HEIGHT").MustInt(3072) + BadgeIconDefaultSize = sec.Key("BADGE_ICON_DEFAULT_SIZE").MustUint(200) + IconUploadPath = sec.Key("ICON_UPLOAD_PATH").MustString(path.Join(AppDataPath, "icons")) + SetRadarMapConfig() sec = Cfg.Section("warn_mail") @@ -1557,6 +1594,8 @@ func getModelSafetyConfig() { ModelSafetyTest.NPUBaseDataSetUUID = sec.Key("NPUBaseDataSetUUID").MustString("") ModelSafetyTest.NPUCombatDataSetName = sec.Key("NPUCombatDataSetName").MustString("") ModelSafetyTest.NPUCombatDataSetUUID = sec.Key("NPUCombatDataSetUUID").MustString("") + ModelSafetyTest.HOST = sec.Key("HOST").MustString("") + ModelSafetyTest.KEY = sec.Key("KEY").MustString("") } func getModelConvertConfig() { @@ -1576,6 +1615,10 @@ func getModelConvertConfig() { ModelConvert.NPU_PoolID = sec.Key("NPU_PoolID").MustString("pool7908321a") ModelConvert.NPU_MINDSPORE_IMAGE_ID = sec.Key("NPU_MINDSPORE_IMAGE_ID").MustInt(121) ModelConvert.NPU_TENSORFLOW_IMAGE_ID = sec.Key("NPU_TENSORFLOW_IMAGE_ID").MustInt(35) + ModelConvert.GPU_PADDLE_IMAGE = sec.Key("GPU_PADDLE_IMAGE").MustString("dockerhub.pcl.ac.cn:5000/user-images/openi:paddle2.3.0_gpu_cuda11.2_cudnn8") + ModelConvert.GPU_MXNET_IMAGE = sec.Key("GPU_MXNET_IMAGE").MustString("dockerhub.pcl.ac.cn:5000/user-images/openi:mxnet191cu_cuda102_py37") + ModelConvert.PaddleOnnxBootFile = sec.Key("PaddleOnnxBootFile").MustString("convert_paddle.py") + ModelConvert.MXnetOnnxBootFile = sec.Key("MXnetOnnxBootFile").MustString("convert_mxnet.py") } func getModelAppConfig() { @@ -1612,8 +1655,19 @@ func getGrampusConfig() { if err := json.Unmarshal([]byte(Grampus.C2NetSequence), &C2NetInfos); err != nil { log.Error("Unmarshal(C2NetSequence) failed:%v", err) } + C2NetMapInfo=make(map[string]*C2NetSequenceInfo) + for _,value :=range C2NetInfos.C2NetSqInfo{ + C2NetMapInfo[value.Name]=value + } } Grampus.SyncScriptProject = sec.Key("SYNC_SCRIPT_PROJECT").MustString("script_for_grampus") + Grampus.LocalCenterID = sec.Key("LOCAL_CENTER_ID").MustString("cloudbrain2") + Grampus.AiCenterInfo = sec.Key("AI_CENTER_INFO").MustString("") + if Grampus.AiCenterInfo != "" { + if err := json.Unmarshal([]byte(Grampus.AiCenterInfo), &CenterInfos); err != nil { + log.Error("Unmarshal(AiCenterInfo) failed:%v", err) + } + } } diff --git a/modules/storage/obs.go b/modules/storage/obs.go index 2cb3af927..57ef63029 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -367,51 +367,58 @@ func GetOneLevelAllObjectUnderDir(bucket string, prefixRootPath string, relative if !strings.HasSuffix(input.Prefix, "/") { input.Prefix += "/" } - output, err := ObsCli.ListObjects(input) fileInfos := make([]FileInfo, 0) prefixLen := len(input.Prefix) fileMap := make(map[string]bool, 0) - if err == nil { - for _, val := range output.Contents { - log.Info("val key=" + val.Key) - var isDir bool - var fileName string - if val.Key == input.Prefix { - continue - } - fileName = val.Key[prefixLen:] - log.Info("fileName =" + fileName) - files := strings.Split(fileName, "/") - if fileMap[files[0]] { - continue - } else { - fileMap[files[0]] = true + index := 1 + for { + output, err := ObsCli.ListObjects(input) + if err == nil { + log.Info("Page:%d\n", index) + index++ + for _, val := range output.Contents { + var isDir bool + var fileName string + if val.Key == input.Prefix { + continue + } + fileName = val.Key[prefixLen:] + files := strings.Split(fileName, "/") + if fileMap[files[0]] { + continue + } else { + fileMap[files[0]] = true + } + ParenDir := relativePath + fileName = files[0] + if len(files) > 1 { + isDir = true + ParenDir += fileName + "/" + } else { + isDir = false + } + fileInfo := FileInfo{ + ModTime: val.LastModified.Local().Format("2006-01-02 15:04:05"), + FileName: fileName, + Size: val.Size, + IsDir: isDir, + ParenDir: ParenDir, + } + fileInfos = append(fileInfos, fileInfo) } - ParenDir := relativePath - fileName = files[0] - if len(files) > 1 { - isDir = true - ParenDir += fileName + "/" + if output.IsTruncated { + input.Marker = output.NextMarker } else { - isDir = false + break } - fileInfo := FileInfo{ - ModTime: val.LastModified.Local().Format("2006-01-02 15:04:05"), - FileName: fileName, - Size: val.Size, - IsDir: isDir, - ParenDir: ParenDir, + } else { + if obsError, ok := err.(obs.ObsError); ok { + log.Error("Code:%s, Message:%s", obsError.Code, obsError.Message) } - fileInfos = append(fileInfos, fileInfo) - } - return fileInfos, err - } else { - if obsError, ok := err.(obs.ObsError); ok { - log.Error("Code:%s, Message:%s", obsError.Code, obsError.Message) + return nil, err } - return nil, err } - + return fileInfos, nil } func GetAllObjectByBucketAndPrefix(bucket string, prefix string) ([]FileInfo, error) { @@ -470,47 +477,43 @@ func GetObsListObject(jobName, outPutPath, parentDir, versionName string) ([]Fil input := &obs.ListObjectsInput{} input.Bucket = setting.Bucket input.Prefix = strings.TrimPrefix(path.Join(setting.TrainJobModelPath, jobName, outPutPath, versionName, parentDir), "/") - log.Info("bucket=" + input.Bucket + " Prefix=" + input.Prefix) - strPrefix := strings.Split(input.Prefix, "/") + if !strings.HasSuffix(input.Prefix, "/") { + input.Prefix += "/" + } output, err := ObsCli.ListObjects(input) fileInfos := make([]FileInfo, 0) + prefixLen := len(input.Prefix) + fileMap := make(map[string]bool, 0) if err == nil { for _, val := range output.Contents { - str1 := strings.Split(val.Key, "/") + log.Info("val key=" + val.Key) var isDir bool - var fileName, nextParentDir string - if strings.HasSuffix(val.Key, "/") { - //dirs in next level dir - if len(str1)-len(strPrefix) > 2 { - continue - } - fileName = str1[len(str1)-2] + var fileName string + if val.Key == input.Prefix { + continue + } + fileName = val.Key[prefixLen:] + log.Info("fileName =" + fileName) + files := strings.Split(fileName, "/") + if fileMap[files[0]] { + continue + } else { + fileMap[files[0]] = true + } + ParenDir := parentDir + fileName = files[0] + if len(files) > 1 { isDir = true - if parentDir == "" { - nextParentDir = fileName - } else { - nextParentDir = parentDir + "/" + fileName - } - - if fileName == strPrefix[len(strPrefix)-1] || (fileName+"/") == outPutPath { - continue - } + ParenDir += fileName + "/" } else { - //files in next level dir - if len(str1)-len(strPrefix) > 1 { - continue - } - fileName = str1[len(str1)-1] isDir = false - nextParentDir = parentDir } - fileInfo := FileInfo{ ModTime: val.LastModified.Local().Format("2006-01-02 15:04:05"), FileName: fileName, Size: val.Size, IsDir: isDir, - ParenDir: nextParentDir, + ParenDir: ParenDir, } fileInfos = append(fileInfos, fileInfo) } diff --git a/modules/structs/org.go b/modules/structs/org.go index 4b79a4e70..191843f87 100644 --- a/modules/structs/org.go +++ b/modules/structs/org.go @@ -15,6 +15,7 @@ type Organization struct { Location string `json:"location"` Visibility string `json:"visibility"` RepoAdminChangeTeamAccess bool `json:"repo_admin_change_team_access"` + NumRepos int `json:"num_repos"` } // CreateOrgOption options for creating an organization diff --git a/modules/structs/repo.go b/modules/structs/repo.go index 6e9ece4b0..03741d03b 100755 --- a/modules/structs/repo.go +++ b/modules/structs/repo.go @@ -90,6 +90,7 @@ type Repository struct { AllowRebaseMerge bool `json:"allow_rebase_explicit"` AllowSquash bool `json:"allow_squash_merge"` AvatarURL string `json:"avatar_url"` + Status int `json:"status"` } // CreateRepoOption options when creating repository diff --git a/modules/urfs_client/config/constants.go b/modules/urfs_client/config/constants.go new file mode 100755 index 000000000..76bdc5eab --- /dev/null +++ b/modules/urfs_client/config/constants.go @@ -0,0 +1,93 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "time" +) + +// Reason of backing to source. +const ( + BackSourceReasonNone = 0 + BackSourceReasonRegisterFail = 1 + BackSourceReasonMd5NotMatch = 2 + BackSourceReasonDownloadError = 3 + BackSourceReasonNoSpace = 4 + BackSourceReasonInitError = 5 + BackSourceReasonWriteError = 6 + BackSourceReasonHostSysError = 7 + BackSourceReasonNodeEmpty = 8 + BackSourceReasonSourceError = 10 + BackSourceReasonUserSpecified = 100 + ForceNotBackSourceAddition = 1000 +) + +// Download pattern. +const ( + PatternP2P = "p2p" + PatternSeedPeer = "seed-peer" + PatternSource = "source" +) + +//// Download limit. +//const ( +// DefaultPerPeerDownloadLimit = 20 * unit.MB +// DefaultTotalDownloadLimit = 100 * unit.MB +// DefaultUploadLimit = 100 * unit.MB +// DefaultMinRate = 20 * unit.MB +//) + +// Others. +const ( + DefaultTimestampFormat = "2006-01-02 15:04:05" + SchemaHTTP = "http" + + DefaultTaskExpireTime = 6 * time.Hour + DefaultGCInterval = 1 * time.Minute + DefaultDaemonAliveTime = 5 * time.Minute + DefaultScheduleTimeout = 5 * time.Minute + DefaultDownloadTimeout = 5 * time.Minute + + DefaultSchedulerSchema = "http" + DefaultSchedulerIP = "127.0.0.1" + DefaultSchedulerPort = 8002 + + DefaultPieceChanSize = 16 + DefaultObjectMaxReplicas = 3 +) + +// Dfcache subcommand names. +const ( + CmdStat = "stat" + CmdImport = "import" + CmdExport = "export" + CmdDelete = "delete" +) + +// Service defalut port of listening. +const ( + DefaultEndPort = 65535 + DefaultPeerStartPort = 65000 + DefaultUploadStartPort = 65002 + DefaultObjectStorageStartPort = 65004 + DefaultHealthyStartPort = 40901 +) + +var ( + // DefaultCertValidityPeriod is default validity period of certificate. + DefaultCertValidityPeriod = 180 * 24 * time.Hour +) diff --git a/modules/urfs_client/config/dfstore.go b/modules/urfs_client/config/dfstore.go new file mode 100755 index 000000000..aafb1b33c --- /dev/null +++ b/modules/urfs_client/config/dfstore.go @@ -0,0 +1,66 @@ +/* + * Copyright 2022 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "errors" + "fmt" + "net/url" +) + +type DfstoreConfig struct { + // Address of the object storage service. + Endpoint string `yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Filter is used to generate a unique Task ID by + // filtering unnecessary query params in the URL, + // it is separated by & character. + Filter string `yaml:"filter,omitempty" mapstructure:"filter,omitempty"` + + // Mode is the mode in which the backend is written, + // including WriteBack and AsyncWriteBack. + Mode int `yaml:"mode,omitempty" mapstructure:"mode,omitempty"` + + // MaxReplicas is the maximum number of + // replicas of an object cache in seed peers. + MaxReplicas int `yaml:"maxReplicas,omitempty" mapstructure:"mode,maxReplicas"` +} + +// New dfstore configuration. +func NewDfstore() *DfstoreConfig { + url := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", "127.0.0.1", DefaultObjectStorageStartPort), + } + + return &DfstoreConfig{ + Endpoint: url.String(), + MaxReplicas: DefaultObjectMaxReplicas, + } +} + +func (cfg *DfstoreConfig) Validate() error { + if cfg.Endpoint == "" { + return errors.New("dfstore requires parameter endpoint") + } + + if _, err := url.ParseRequestURI(cfg.Endpoint); err != nil { + return fmt.Errorf("invalid endpoint: %w", err) + } + + return nil +} diff --git a/modules/urfs_client/config/headers.go b/modules/urfs_client/config/headers.go new file mode 100755 index 000000000..9a27296d3 --- /dev/null +++ b/modules/urfs_client/config/headers.go @@ -0,0 +1,32 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +const ( + HeaderDragonflyFilter = "X-Dragonfly-Filter" + HeaderDragonflyPeer = "X-Dragonfly-Peer" + HeaderDragonflyTask = "X-Dragonfly-Task" + HeaderDragonflyRange = "X-Dragonfly-Range" + // HeaderDragonflyTag different HeaderDragonflyTag for the same url will be divided into different P2P overlay + HeaderDragonflyTag = "X-Dragonfly-Tag" + // HeaderDragonflyApplication is used for statistics and traffic control + HeaderDragonflyApplication = "X-Dragonfly-Application" + // HeaderDragonflyRegistry is used for dynamic registry mirrors. + HeaderDragonflyRegistry = "X-Dragonfly-Registry" + // HeaderDragonflyObjectMetaDigest is used for digest of object storage. + HeaderDragonflyObjectMetaDigest = "X-Dragonfly-Object-Meta-Digest" +) diff --git a/modules/urfs_client/dfstore/dfstore.go b/modules/urfs_client/dfstore/dfstore.go new file mode 100755 index 000000000..e515d2bad --- /dev/null +++ b/modules/urfs_client/dfstore/dfstore.go @@ -0,0 +1,307 @@ +package dfstore + +import ( + "context" + "errors" + "fmt" + "github.com/go-http-utils/headers" + "io" + "net/http" + "net/url" + "path" + "strconv" + + "code.gitea.io/gitea/modules/urfs_client/config" + pkgobjectstorage "code.gitea.io/gitea/modules/urfs_client/objectstorage" +) + +// Dfstore is the interface used for object storage. +type Dfstore interface { + + // GetUrfsMetadataRequestWithContext returns *http.Request of getting Urfs metadata. + GetUrfsMetadataRequestWithContext(ctx context.Context, input *GetUrfsMetadataInput) (*http.Request, error) + + // GetUrfsMetadataWithContext returns matedata of Urfs. + GetUrfsMetadataWithContext(ctx context.Context, input *GetUrfsMetadataInput) (*pkgobjectstorage.ObjectMetadata, error) + + // GetUrfsRequestWithContext returns *http.Request of getting Urfs. + GetUrfsRequestWithContext(ctx context.Context, input *GetUrfsInput) (*http.Request, error) + + // GetUrfsWithContext returns data of Urfs. + GetUrfsWithContext(ctx context.Context, input *GetUrfsInput) (io.ReadCloser, error) + + // GetUrfsStatusRequestWithContext returns *http.Request of getting Urfs status. + GetUrfsStatusRequestWithContext(ctx context.Context, input *GetUrfsInput) (*http.Request, error) + + // GetUrfsStatusWithContext returns schedule status of Urfs. + GetUrfsStatusWithContext(ctx context.Context, input *GetUrfsInput) (io.ReadCloser, error) +} + +// dfstore provides object storage function. +type dfstore struct { + endpoint string + httpClient *http.Client +} + +// Option is a functional option for configuring the dfstore. +type Option func(dfs *dfstore) + +// New dfstore instance. +func New(endpoint string, options ...Option) Dfstore { + dfs := &dfstore{ + endpoint: endpoint, + httpClient: http.DefaultClient, + } + + for _, opt := range options { + opt(dfs) + } + + return dfs +} + +// GetUrfsMetadataInput is used to construct request of getting object metadata. +type GetUrfsMetadataInput struct { + + // Endpoint is endpoint name. + Endpoint string + + // BucketName is bucket name. + BucketName string + + // ObjectKey is object key. + ObjectKey string + + // DstPeer is target peerHost. + DstPeer string +} + +// Validate validates GetUrfsMetadataInput fields. +func (i *GetUrfsMetadataInput) Validate() error { + + if i.Endpoint == "" { + return errors.New("invalid Endpoint") + + } + + if i.BucketName == "" { + return errors.New("invalid BucketName") + + } + + if i.ObjectKey == "" { + return errors.New("invalid ObjectKey") + } + + return nil +} + +// GetObjectMetadataRequestWithContext returns *http.Request of getting object metadata. +func (dfs *dfstore) GetUrfsMetadataRequestWithContext(ctx context.Context, input *GetUrfsMetadataInput) (*http.Request, error) { + if err := input.Validate(); err != nil { + return nil, err + } + + dstUrl := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", input.DstPeer, config.DefaultObjectStorageStartPort), + } + + u, err := url.Parse(dstUrl.String()) + if err != nil { + return nil, err + } + + u.Path = path.Join("buckets", input.BucketName+"."+input.Endpoint, "objects", input.ObjectKey) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// GetObjectMetadataWithContext returns metadata of object. +func (dfs *dfstore) GetUrfsMetadataWithContext(ctx context.Context, input *GetUrfsMetadataInput) (*pkgobjectstorage.ObjectMetadata, error) { + req, err := dfs.GetUrfsMetadataRequestWithContext(ctx, input) + if err != nil { + return nil, err + } + + resp, err := dfs.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode/100 != 2 { + return nil, fmt.Errorf("bad response status %s", resp.Status) + } + + contentLength, err := strconv.ParseInt(resp.Header.Get(headers.ContentLength), 10, 64) + if err != nil { + return nil, err + } + + return &pkgobjectstorage.ObjectMetadata{ + ContentDisposition: resp.Header.Get(headers.ContentDisposition), + ContentEncoding: resp.Header.Get(headers.ContentEncoding), + ContentLanguage: resp.Header.Get(headers.ContentLanguage), + ContentLength: int64(contentLength), + ContentType: resp.Header.Get(headers.ContentType), + ETag: resp.Header.Get(headers.ContentType), + Digest: resp.Header.Get(config.HeaderDragonflyObjectMetaDigest), + }, nil +} + +// GetUrfsInput is used to construct request of getting object. +type GetUrfsInput struct { + + // Endpoint is endpoint name. + Endpoint string + + // BucketName is bucket name. + BucketName string + + // ObjectKey is object key. + ObjectKey string + + // Filter is used to generate a unique Task ID by + // filtering unnecessary query params in the URL, + // it is separated by & character. + Filter string + + // Range is the HTTP range header. + Range string + + // DstPeer is target peerHost. + DstPeer string +} + +// GetObjectWithContext returns data of object. +func (dfs *dfstore) GetUrfsWithContext(ctx context.Context, input *GetUrfsInput) (io.ReadCloser, error) { + req, err := dfs.GetUrfsRequestWithContext(ctx, input) + if err != nil { + return nil, err + } + + resp, err := dfs.httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode/100 != 2 { + return nil, fmt.Errorf("bad response status %s", resp.Status) + } + + return resp.Body, nil +} + +// GetObjectRequestWithContext returns *http.Request of getting object. +func (dfs *dfstore) GetUrfsRequestWithContext(ctx context.Context, input *GetUrfsInput) (*http.Request, error) { + if err := input.Validate(); err != nil { + return nil, err + } + + dstUrl := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", input.DstPeer, config.DefaultObjectStorageStartPort), + } + + u, err := url.Parse(dstUrl.String()) + if err != nil { + return nil, err + } + + u.Path = path.Join("buckets", input.BucketName+"."+input.Endpoint, "cache_object", input.ObjectKey) + + query := u.Query() + if input.Filter != "" { + query.Set("filter", input.Filter) + } + u.RawQuery = query.Encode() + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), nil) + if err != nil { + return nil, err + } + + if input.Range != "" { + req.Header.Set(headers.Range, input.Range) + } + + return req, nil +} + +// Validate validates GetUrfsInput fields. +func (i *GetUrfsInput) Validate() error { + + if i.Endpoint == "" { + return errors.New("invalid Endpoint") + + } + + if i.BucketName == "" { + return errors.New("invalid BucketName") + + } + + if i.ObjectKey == "" { + return errors.New("invalid ObjectKey") + } + + return nil +} + +// GetUrfsStatusWithContext returns schedule task status. +func (dfs *dfstore) GetUrfsStatusWithContext(ctx context.Context, input *GetUrfsInput) (io.ReadCloser, error) { + req, err := dfs.GetUrfsStatusRequestWithContext(ctx, input) + if err != nil { + return nil, err + } + + resp, err := dfs.httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode/100 != 2 { + return nil, fmt.Errorf("bad response status %s", resp.Status) + } + + return resp.Body, nil +} + +// GetObjectStatusRequestWithContext returns *http.Request of check schedule task status. +func (dfs *dfstore) GetUrfsStatusRequestWithContext(ctx context.Context, input *GetUrfsInput) (*http.Request, error) { + if err := input.Validate(); err != nil { + return nil, err + } + + dstUrl := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", input.DstPeer, config.DefaultObjectStorageStartPort), + } + + u, err := url.Parse(dstUrl.String()) + if err != nil { + return nil, err + } + + u.Path = path.Join("buckets", input.BucketName+"."+input.Endpoint, "check_object", input.ObjectKey) + + query := u.Query() + if input.Filter != "" { + query.Set("filter", input.Filter) + } + u.RawQuery = query.Encode() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + if input.Range != "" { + req.Header.Set(headers.Range, input.Range) + } + + return req, nil +} diff --git a/modules/urfs_client/objectstorage/mocks/objectstorage_mock.go b/modules/urfs_client/objectstorage/mocks/objectstorage_mock.go new file mode 100644 index 000000000..baa34f437 --- /dev/null +++ b/modules/urfs_client/objectstorage/mocks/objectstorage_mock.go @@ -0,0 +1,5 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: objectstorage.go + +// Package mocks is a generated GoMock package. +package mocks diff --git a/modules/urfs_client/objectstorage/objectstorage.go b/modules/urfs_client/objectstorage/objectstorage.go new file mode 100755 index 000000000..e81356760 --- /dev/null +++ b/modules/urfs_client/objectstorage/objectstorage.go @@ -0,0 +1,47 @@ +/* + * Copyright 2022 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//go:generate mockgen -destination mocks/objectstorage_mock.go -source objectstorage.go -package mocks + +package objectstorage + +type ObjectMetadata struct { + // Key is object key. + Key string + + // ContentDisposition is Content-Disposition header. + ContentDisposition string + + // ContentEncoding is Content-Encoding header. + ContentEncoding string + + // ContentLanguage is Content-Language header. + ContentLanguage string + + // ContentLanguage is Content-Length header. + ContentLength int64 + + // ContentType is Content-Type header. + ContentType string + + // ETag is ETag header. + ETag string + + // Digest is object digest. + Digest string +} + + diff --git a/modules/urfs_client/urchin/schedule.go b/modules/urfs_client/urchin/schedule.go new file mode 100755 index 000000000..73ea7b39d --- /dev/null +++ b/modules/urfs_client/urchin/schedule.go @@ -0,0 +1,112 @@ +package urchin + +import ( + "encoding/json" + "fmt" + "strings" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/labelmsg" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" +) + +type DecompressReq struct { + SourceFile string `json:"source_file"` + DestPath string `json:"dest_path"` +} + +var urfsClient Urchinfs + +func getUrfsClient() { + if urfsClient != nil { + return + } + + urfsClient = New() +} + +func GetBackNpuModel(cloudbrainID int64, endpoint, bucket, objectKey, destPeerHost string) error { + getUrfsClient() + res, err := urfsClient.ScheduleDataToPeerByKey(endpoint, bucket, objectKey, destPeerHost) + if err != nil { + log.Error("ScheduleDataToPeerByKey failed:%v", err) + return err + } + + _, err = models.InsertScheduleRecord(&models.ScheduleRecord{ + CloudbrainID: cloudbrainID, + EndPoint: res.DataEndpoint, + Bucket: res.DataRoot, + ObjectKey: res.DataPath, + ProxyServer: destPeerHost, + Status: res.StatusCode, + }) + if err != nil { + log.Error("InsertScheduleRecord failed:%v", err) + return err + } + + switch res.StatusCode { + case models.StorageScheduleSucceed: + log.Info("ScheduleDataToPeerByKey succeed") + decompress(res.DataRoot+"/"+res.DataPath, setting.Bucket+"/"+strings.TrimSuffix(res.DataPath, models.ModelSuffix)) + case models.StorageScheduleProcessing: + log.Info("ScheduleDataToPeerByKey processing") + case models.StorageScheduleFailed: + log.Error("ScheduleDataToPeerByKey failed:%s", res.StatusMsg) + return fmt.Errorf("GetBackNpuModel failed:%s", res.StatusMsg) + default: + log.Info("ScheduleDataToPeerByKey failed, unknown StatusCode:%d", res.StatusCode) + return fmt.Errorf("GetBackNpuModel failed, unknow StatusCode:%d", res.StatusCode) + } + + return nil +} + +func HandleScheduleRecords() error { + getUrfsClient() + records, err := models.GetSchedulingRecord() + if err != nil { + log.Error("GetSchedulingRecord failed:%v", err) + return err + } + + for _, record := range records { + res, err := urfsClient.CheckScheduleTaskStatusByKey(record.EndPoint, record.Bucket, record.ObjectKey, record.ProxyServer) + if err != nil { + log.Error("CheckScheduleTaskStatusByKey(%d) failed:%v", record.ID, err) + continue + } + + record.Status = res.StatusCode + models.UpdateScheduleCols(record, "status") + + switch res.StatusCode { + case models.StorageScheduleSucceed: + log.Info("ScheduleDataToPeerByKey(%s) succeed", record.ObjectKey) + decompress(record.Bucket+"/"+record.ObjectKey, setting.Bucket+"/"+strings.TrimSuffix(record.ObjectKey, models.ModelSuffix)) + case models.StorageScheduleProcessing: + log.Info("ScheduleDataToPeerByKey(%s) processing", record.ObjectKey) + case models.StorageScheduleFailed: + log.Error("ScheduleDataToPeerByKey(%s) failed:%s", record.ObjectKey, res.StatusMsg) + + default: + log.Info("ScheduleDataToPeerByKey(%s) failed, unknown StatusCode:%d", record.ObjectKey, res.StatusCode) + } + + } + + return nil +} + +func decompress(sourceFile, destPath string) { + req, _ := json.Marshal(DecompressReq{ + SourceFile: sourceFile, + DestPath: destPath, + }) + err := labelmsg.SendDecompressAttachToLabelOBS(string(req)) + if err != nil { + log.Error("SendDecompressTask to labelsystem (%s) failed:%s", sourceFile, err.Error()) + } +} diff --git a/modules/urfs_client/urchin/urchinfs.go b/modules/urfs_client/urchin/urchinfs.go new file mode 100755 index 000000000..ae81e4e98 --- /dev/null +++ b/modules/urfs_client/urchin/urchinfs.go @@ -0,0 +1,276 @@ +package urchin + +import ( + "context" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strconv" + "strings" + + "code.gitea.io/gitea/modules/urfs_client/config" + urfs "code.gitea.io/gitea/modules/urfs_client/dfstore" +) + +type Urchinfs interface { + + //// schedule source dataset to target peer + //ScheduleDataToPeer(sourceUrl, destPeerHost string) (*PeerResult, error) + // + //// check schedule data to peer task status + //CheckScheduleTaskStatus(sourceUrl, destPeerHost string) (*PeerResult, error) + + ScheduleDataToPeerByKey(endpoint, bucketName, objectKey, destPeerHost string) (*PeerResult, error) + + CheckScheduleTaskStatusByKey(endpoint, bucketName, objectKey, destPeerHost string) (*PeerResult, error) +} + +type urchinfs struct { + // Initialize default urfs config. + cfg *config.DfstoreConfig +} + +// New urchinfs instance. +func New() Urchinfs { + + urfs := &urchinfs{ + cfg: config.NewDfstore(), + } + return urfs +} + +const ( + // UrfsScheme if the scheme of object storage. + UrfsScheme = "urfs" +) + +/* +func (urfs *urchinfs) ScheduleDataToPeer(sourceUrl, destPeerHost string) (*PeerResult, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if err := urfs.cfg.Validate(); err != nil { + return nil, err + } + + if err := validateSchedulelArgs(sourceUrl, destPeerHost); err != nil { + return nil, err + } + + // Copy object storage to local file. + endpoint, bucketName, objectKey, err := parseUrfsURL(sourceUrl) + if err != nil { + return nil, err + } + peerResult, err := processScheduleDataToPeer(ctx, urfs.cfg, endpoint, bucketName, objectKey, destPeerHost) + if err != nil { + return nil, err + } + + return peerResult, err +} + +*/ + +func (urfs *urchinfs) ScheduleDataToPeerByKey(endpoint, bucketName, objectKey, destPeerHost string) (*PeerResult, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + peerResult, err := processScheduleDataToPeer(ctx, urfs.cfg, endpoint, bucketName, objectKey, destPeerHost) + if err != nil { + return nil, err + } + + return peerResult, err +} + +/* +func (urfs *urchinfs) CheckScheduleTaskStatus(sourceUrl, destPeerHost string) (*PeerResult, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if err := urfs.cfg.Validate(); err != nil { + return nil, err + } + + if err := validateSchedulelArgs(sourceUrl, destPeerHost); err != nil { + return nil, err + } + + // Copy object storage to local file. + endpoint, bucketName, objectKey, err := parseUrfsURL(sourceUrl) + if err != nil { + return nil, err + } + peerResult, err := processCheckScheduleTaskStatus(ctx, urfs.cfg, endpoint, bucketName, objectKey, destPeerHost) + if err != nil { + return nil, err + } + + return peerResult, err +} + +*/ + +func (urfs *urchinfs) CheckScheduleTaskStatusByKey(endpoint, bucketName, objectKey, destPeerHost string) (*PeerResult, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + peerResult, err := processCheckScheduleTaskStatus(ctx, urfs.cfg, endpoint, bucketName, objectKey, destPeerHost) + if err != nil { + return nil, err + } + + return peerResult, err +} + +// isUrfsURL determines whether the raw url is urfs url. +func isUrfsURL(rawURL string) bool { + u, err := url.ParseRequestURI(rawURL) + if err != nil { + return false + } + + if u.Scheme != UrfsScheme || u.Host == "" || u.Path == "" { + return false + } + + return true +} + +// Validate copy arguments. +func validateSchedulelArgs(sourceUrl, destPeer string) error { + if !isUrfsURL(sourceUrl) { + return errors.New("source url should be urfs:// protocol") + } + + return nil +} + +/* +// Parse object storage url. eg: urfs://源数据$endpoint/源数据$bucket/源数据filepath +func parseUrfsURL(rawURL string) (string, string, string, error) { + u, err := url.ParseRequestURI(rawURL) + if err != nil { + return "", "", "", err + } + + if u.Scheme != UrfsScheme { + return "", "", "", fmt.Errorf("invalid scheme, e.g. %s://endpoint/bucket_name/object_key", UrfsScheme) + } + + if u.Host == "" { + return "", "", "", errors.New("empty endpoint name") + } + + if u.Path == "" { + return "", "", "", errors.New("empty object path") + } + + bucket, key, found := strings.Cut(strings.Trim(u.Path, "/"), "/") + if found == false { + return "", "", "", errors.New("invalid bucket and object key " + u.Path) + } + + return u.Host, bucket, key, nil +} + +*/ + +// Schedule object storage to peer. +func processScheduleDataToPeer(ctx context.Context, cfg *config.DfstoreConfig, endpoint, bucketName, objectKey, dstPeer string) (*PeerResult, error) { + dfs := urfs.New(cfg.Endpoint) + meta, err := dfs.GetUrfsMetadataWithContext(ctx, &urfs.GetUrfsMetadataInput{ + Endpoint: endpoint, + BucketName: bucketName, + ObjectKey: objectKey, + DstPeer: dstPeer, + }) + if err != nil { + return nil, err + } + + reader, err := dfs.GetUrfsWithContext(ctx, &urfs.GetUrfsInput{ + Endpoint: endpoint, + BucketName: bucketName, + ObjectKey: objectKey, + DstPeer: dstPeer, + }) + if err != nil { + return nil, err + } + defer reader.Close() + + body, err := ioutil.ReadAll(reader) + + var peerResult PeerResult + if err == nil { + err = json.Unmarshal((body), &peerResult) + } + peerResult.SignedUrl = strings.ReplaceAll(peerResult.SignedUrl, "\\u0026", "&") + + fileContentLength, err := strconv.ParseInt(peerResult.ContentLength, 10, 64) + if err != nil { + return nil, err + } + if fileContentLength != meta.ContentLength { + return nil, errors.New("content length inconsistent with meta") + } + + return &peerResult, err +} + +// check schedule task status. +func processCheckScheduleTaskStatus(ctx context.Context, cfg *config.DfstoreConfig, endpoint, bucketName, objectKey, dstPeer string) (*PeerResult, error) { + dfs := urfs.New(cfg.Endpoint) + meta, err := dfs.GetUrfsMetadataWithContext(ctx, &urfs.GetUrfsMetadataInput{ + Endpoint: endpoint, + BucketName: bucketName, + ObjectKey: objectKey, + DstPeer: dstPeer, + }) + if err != nil { + return nil, err + } + + reader, err := dfs.GetUrfsStatusWithContext(ctx, &urfs.GetUrfsInput{ + Endpoint: endpoint, + BucketName: bucketName, + ObjectKey: objectKey, + DstPeer: dstPeer, + }) + if err != nil { + return nil, err + } + defer reader.Close() + + body, err := ioutil.ReadAll(reader) + + var peerResult PeerResult + if err == nil { + err = json.Unmarshal((body), &peerResult) + } + peerResult.SignedUrl = strings.ReplaceAll(peerResult.SignedUrl, "\\u0026", "&") + + fileContentLength, err := strconv.ParseInt(peerResult.ContentLength, 10, 64) + if err != nil { + return nil, err + } + if fileContentLength != meta.ContentLength { + return nil, err + } + return &peerResult, err +} + +type PeerResult struct { + ContentType string `json:"Content-Type"` + ContentLength string `json:"Content-Length"` + SignedUrl string + DataRoot string + DataPath string + DataEndpoint string + StatusCode int + StatusMsg string + TaskID string +} diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 773a338c1..955690800 100755 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -265,8 +265,8 @@ page_dev_yunlao_desc3=China computing power network (C²NET) phase I can realize page_dev_yunlao_desc4=Developers can freely select the corresponding computing resources according to the use needs, and can test the adaptability, performance, stability, etc. of the model in different hardware environments. page_dev_yunlao_desc5=If your model requires more computing resources, you can also apply for it separately. page_dev_yunlao_apply=Apply Separately -c2net_title=China Computing Network -c2net_desc=The artificial intelligence computing power network promotion alliance has access to 11 intelligent computing centers, with a total scale of 1924p. +c2net_title=China Computing NET(C²NET) +c2net_desc=Extensive access to intelligent computing centers, supercomputing centers and big data centers across the country to provide users with free computing resources. c2net_center=Center search=Search search_repo=Repository @@ -289,6 +289,7 @@ provide_resoure = Computing resources of CPU/GPU/NPU are provided freely for var activity = Activity no_events = There are no events related or_t = or +powerdby=Powered_by Pengcheng CloudBrain、China Computing NET(C²NET)、 [explore] repos = Repositories @@ -525,6 +526,7 @@ datasets = Datasets activity = Public Activity followers = Followers starred = Starred Repositories +badge = Achievement Badge following = Following follow = Follow unfollow = Unfollow @@ -1215,7 +1217,8 @@ cloudbrain.benchmark.evaluate_train=Train Script cloudbrain.benchmark.evaluate_test=Test Script cloudbrain.benchmark.types={"type":[{"id":1,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=detection","first":"Target detection","second":[{"id":1,"value":"None","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"yangzhx","repo_name":"detection_benchmark_script"}]},{"id":2,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=reid","first":"Target re-identification","second":[{"id":1,"value":"Vehicle re-identification","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"JiahongXu","repo_name":"benchmark_reID_script"},{"id":2,"value":"Image-based person re-identification","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"JiahongXu","repo_name":"benchmark_reID_script"}]},{"id":3,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=tracking","first":"Multi-target tracking","second":[{"id":1,"value":"None","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"lix07","repo_name":"MOT_benchmark_script"}]}]} cloudbrain.morethanonejob=You already have a running or waiting task, create it after that task is over. - +cloudbrain.morethanonejob1=You have created an equivalent task that is waiting or running, please wait for the task to finish before creating it. +cloudbrain.morethanonejob2=You can view all your Cloud Brain tasks in Home > Cloudbrain Task . modelarts.infer_job_model = Model modelarts.infer_job_model_file = Model File @@ -1225,6 +1228,9 @@ modelarts.infer_job.select_model = Select Model modelarts.infer_job.boot_file_helper=The startup file is the entry file for your program execution and must end in.py.Such as inference.py, main.py, example/inference.py, case/main.py. modelarts.infer_job.tooltip = The model has been deleted and cannot be viewed. modelarts.download_log=Download log file +modelarts.log_file = Log file +modelarts.fullscreen_log_file = View in full screen +modelarts.exit_full_screen = Exit fullscreen modelarts.no_node_right = The value of 'Amount of Compute Node' is wrong, you have no right to use the current value of 'Amount of Compute Node'. @@ -3177,7 +3183,7 @@ foot.help = help foot.copyright= Copyright: New Generation Artificial Intelligence Open Source Open Platform (OpenI) Platform_Tutorial = Tutorial foot.advice_feedback = Feedback - +resource_description = Resource Note [cloudbrain] all_resource_cluster=All Cluster all_ai_center=All Computing NET @@ -3216,7 +3222,7 @@ view_sample = View sample inference_output_path_rule = The inference output path is stored in the run parameter result_url. model_file_path_rule=The model file location is stored in the run parameter ckpt_url model_file_postfix_rule = The supported format of the model file is [ckpt, pb, h5, json, pkl, pth, t7, pdparams, onnx, pbtxt, keras, mlmodel, cfg, pt] -model_convert_postfix_rule = The supported format of the model file is [.pth, .pkl, .onnx, .mindir, .ckpt, .pb] +model_convert_postfix_rule = The supported format of the model file is [.pth, .pkl, .onnx, .mindir, .ckpt, .pb, .pdmodel, .pdiparams, .params, .json] delete_task = Delete task task_delete_confirm = Are you sure you want to delete this task? Once this task is deleted, it cannot be recovered. operate_confirm = confirm diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index 8ba4d252d..63ff87345 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -267,8 +267,8 @@ page_dev_yunlao_desc3=中国算力网(C²NET)一期可实现不同人工智 page_dev_yunlao_desc4=开发者可以根据使用需求,自由选择相应计算资源,可以测试模型在不同硬件环境下的适配能力、性能、稳定性等。 page_dev_yunlao_desc5=如果您的模型需要更多的计算资源,也可以单独申请。 page_dev_yunlao_apply=单独申请 -c2net_title=智算网络 -c2net_desc=人工智能算力网络推进联盟已接入11家智算中心,算力总规模1924P +c2net_title=中国算力网(C²NET) +c2net_desc=广泛接入全国各地智算中心、超算中心与大数据中心等,为用户提供普惠算力资源 c2net_center=中心 search=搜索 search_repo=项目 @@ -287,11 +287,12 @@ explore_AI = 探索更好的AI,来这里发现更有意思的 datasets = 数据集 repositories = 项目 use_plt__fuction = 使用本平台提供的AI协作功能,如:托管代码、共享数据、调试算法或训练模型,请先 -provide_resoure = 平台目前免费提供CPU、GPU、NPU的算力资源,可进行多种类型的AI任务。 +provide_resoure = 平台目前提供CPU、GPU、NPU的普惠算力资源,可进行多种类型的AI任务。 create_pro = 创建项目 activity = 活动 no_events = 还没有与您相关的活动 or_t = 或 +powerdby=Powered_by 鹏城实验室云脑、中国算力网(C²NET)、 [explore] @@ -530,6 +531,7 @@ datasets=数据集 activity=公开活动 followers=关注者 starred=已点赞 +badge=成就徽章 following=关注中 follow=关注 unfollow=取消关注 @@ -1229,6 +1231,8 @@ cloudbrain.benchmark.evaluate_test=测试程序 cloudbrain.benchmark.types={"type":[{"id":1,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=detection","first":"目标检测","second":[{"id":1,"value":"无","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"yangzhx","repo_name":"detection_benchmark_script"}]},{"id":2,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=reid","first":"目标重识别","second":[{"id":1,"value":"车辆重识别","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"JiahongXu","repo_name":"benchmark_reID_script"},{"id":2,"value":"基于图像的行人重识别","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"JiahongXu","repo_name":"benchmark_reID_script"}]},{"id":3,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=tracking","first":"多目标跟踪","second":[{"id":1,"value":"无","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"lix07","repo_name":"MOT_benchmark_script"}]}]} cloudbrain.benchmark.model.types={"type":[{"id":1,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=detection","first":"目标检测","second":[{"id":1,"value":"无","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"yangzhx","repo_name":"detection_benchmark_script"}]},{"id":2,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=reid","first":"目标重识别","second":[{"id":1,"value":"车辆重识别","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"JiahongXu","repo_name":"benchmark_reID_script"},{"id":2,"value":"基于图像的行人重识别","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"JiahongXu","repo_name":"benchmark_reID_script"}]},{"id":3,"rank_link":"https://git.openi.org.cn/benchmark/?username=admin&algType=tracking","first":"多目标跟踪","second":[{"id":1,"value":"无","attachment":"84cf39c4-d8bc-41aa-aaa3-182ce289b105","owner":"lix07","repo_name":"MOT_benchmark_script"}]}]} cloudbrain.morethanonejob=您已经创建了一个正在等待或运行中的同类任务,请等待任务结束再创建。 +cloudbrain.morethanonejob1=您已经有 同类任务 正在等待或运行中,请等待任务结束再创建; +cloudbrain.morethanonejob2=可以在 “个人中心 > 云脑任务” 查看您所有的云脑任务。 modelarts.infer_job_model = 模型名称 modelarts.infer_job_model_file = 模型文件 @@ -1238,6 +1242,9 @@ modelarts.infer_job.select_model = 选择模型 modelarts.infer_job.boot_file_helper=启动文件是您程序执行的入口文件,必须是以.py结尾的文件。比如inference.py、main.py、example/inference.py、case/main.py。 modelarts.infer_job.tooltip = 该模型已删除,无法查看。 modelarts.download_log=下载日志文件 +modelarts.log_file=日志文件 +modelarts.fullscreen_log_file=全屏查看 +modelarts.exit_full_screen=退出全屏 modelarts.no_node_right = 计算节点数的值配置错误,您没有权限使用当前配置的计算节点数。 @@ -3194,6 +3201,7 @@ foot.help=帮助 foot.copyright= 版权所有:新一代人工智能开源开放平台(OpenI) Platform_Tutorial=新手指引 foot.advice_feedback = 意见反馈 +resource_description = 资源说明 [cloudbrain] all_resource_cluster=全部集群 @@ -3234,7 +3242,7 @@ view_sample = 查看样例 inference_output_path_rule = 推理输出路径存储在运行参数 result_url 中。 model_file_path_rule = 模型文件位置存储在运行参数 ckpt_url 中。 model_file_postfix_rule = 模型文件支持的格式为 [ckpt, pb, h5, json, pkl, pth, t7, pdparams, onnx, pbtxt, keras, mlmodel, cfg, pt] -model_convert_postfix_rule = 模型文件支持的格式为 [.pth, .pkl, .onnx, .mindir, .ckpt, .pb] +model_convert_postfix_rule = 模型文件支持的格式为 [.pth, .pkl, .onnx, .mindir, .ckpt, .pb, .pdmodel, .pdiparams, .params, .json] delete_task = 删除任务 task_delete_confirm = 你确认删除该任务么?此任务一旦删除不可恢复。 operate_confirm = 确定操作 diff --git a/routers/admin/cloudbrains.go b/routers/admin/cloudbrains.go index d03c00ae6..cbf6782ed 100755 --- a/routers/admin/cloudbrains.go +++ b/routers/admin/cloudbrains.go @@ -98,6 +98,13 @@ func CloudBrains(ctx *context.Context) { ciTasks[i].CanDebug = true ciTasks[i].CanDel = true ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource + if ciTasks[i].Cloudbrain.Spec != nil { + if ciTasks[i].Cloudbrain.Type == models.TypeC2Net { + ciTasks[i].Cloudbrain.Spec.Cluster = models.C2NetCluster + } else { + ciTasks[i].Cloudbrain.Spec.Cluster = models.OpenICluster + } + } } pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, getTotalPage(count, setting.UI.IssuePagingNum)) diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 8e1d725ed..69de79c10 100755 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -599,6 +599,11 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/hours_data", repo.GetCloudbrainsCreateHoursData) m.Get("/waitting_top_data", repo.GetWaittingTop) m.Get("/running_top_data", repo.GetRunningTop) + + m.Get("/overview_resource", repo.GetCloudbrainResourceOverview) + m.Get("/resource_usage_statistic", repo.GetDurationRateStatistic) + m.Get("/resource_usage_rate_detail", repo.GetCloudbrainResourceUsageDetail) + m.Get("/apitest_for_statistic", repo.CloudbrainDurationStatisticForTest) }) }, operationReq) @@ -702,6 +707,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/issues/search", repo.SearchIssues) m.Post("/migrate", reqToken(), bind(auth.MigrateRepoForm{}), repo.Migrate) + m.Post("/migrate/submit", reqToken(), bind(auth.MigrateRepoForm{}), repo.MigrateSubmit) m.Group("/:username/:reponame", func() { m.Combo("").Get(reqAnyRepoReader(), repo.Get). diff --git a/routers/api/v1/repo/cloudbrain.go b/routers/api/v1/repo/cloudbrain.go index ba46ab58c..2303ec7ee 100755 --- a/routers/api/v1/repo/cloudbrain.go +++ b/routers/api/v1/repo/cloudbrain.go @@ -172,6 +172,7 @@ func GetCloudBrainInferenceJob(ctx *context.APIContext) { "JobID": jobID, "JobStatus": job.Status, "JobDuration": job.TrainJobDuration, + "StartTime": job.StartTime, }) } @@ -441,6 +442,7 @@ func ModelSafetyGetLog(ctx *context.APIContext) { "Content": result.Content, "Lines": result.Lines, "CanLogDownload": isCanDownloadLog(ctx, job), + "StartTime": job.StartTime, }) } } @@ -572,7 +574,13 @@ func CloudbrainGetLog(ctx *context.APIContext) { startLine = 0 } } + } else { + if startLine > 0 { + startLine += 1 + endLine += 1 + } } + result = getLogFromModelDir(job.JobName, startLine, endLine, resultPath) if result == nil { log.Error("GetJobLog failed: %v", err, ctx.Data["MsgID"]) @@ -595,6 +603,7 @@ func CloudbrainGetLog(ctx *context.APIContext) { "Content": content, "Lines": result["Lines"], "CanLogDownload": result["FileName"] != "", + "StartTime": job.StartTime, } //result := CloudbrainGetLogByJobId(job.JobID, job.JobName) ctx.JSON(http.StatusOK, re) @@ -719,10 +728,10 @@ func getLogFromModelDir(jobName string, startLine int, endLine int, resultPath s line, error := r.ReadString('\n') if error == io.EOF { if i >= startLine { - fileEndLine = i re = re + line count++ } + fileEndLine = i + 1 log.Info("read file completed.") break } @@ -732,13 +741,12 @@ func getLogFromModelDir(jobName string, startLine int, endLine int, resultPath s } if error == nil { if i >= startLine { - fileEndLine = i + fileEndLine = i + 1 re = re + line count++ } } } - fileEndLine = fileEndLine + 1 } else { log.Info("error:" + err.Error()) } diff --git a/routers/api/v1/repo/cloudbrain_dashboard.go b/routers/api/v1/repo/cloudbrain_dashboard.go index c665fe256..d1ccf1bf5 100755 --- a/routers/api/v1/repo/cloudbrain_dashboard.go +++ b/routers/api/v1/repo/cloudbrain_dashboard.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" "net/url" + "strconv" "strings" "time" @@ -120,9 +121,6 @@ func GetOverviewDuration(ctx *context.Context) { recordBeginTime := recordCloudbrain[0].Cloudbrain.CreatedUnix now := time.Now() endTime := now - page := 1 - pagesize := 10000 - count := pagesize worker_server_num := 1 cardNum := 1 durationAllSum := int64(0) @@ -138,54 +136,46 @@ func GetOverviewDuration(ctx *context.Context) { c2NetDuration := int64(0) cDCenterDuration := int64(0) - for count == pagesize && count != 0 { - cloudbrains, _, err := models.CloudbrainAllStatic(&models.CloudbrainsOptions{ - ListOptions: models.ListOptions{ - Page: page, - PageSize: pagesize, - }, - Type: models.TypeCloudBrainAll, - BeginTimeUnix: int64(recordBeginTime), - EndTimeUnix: endTime.Unix(), - }) - if err != nil { - ctx.ServerError("Get cloudbrains failed:", err) - return - } - models.LoadSpecs4CloudbrainInfo(cloudbrains) + cloudbrains, _, err := models.CloudbrainAllStatic(&models.CloudbrainsOptions{ + Type: models.TypeCloudBrainAll, + BeginTimeUnix: int64(recordBeginTime), + EndTimeUnix: endTime.Unix(), + }) + if err != nil { + ctx.ServerError("Get cloudbrains failed:", err) + return + } + models.LoadSpecs4CloudbrainInfo(cloudbrains) - for _, cloudbrain := range cloudbrains { - if cloudbrain.Cloudbrain.WorkServerNumber >= 1 { - worker_server_num = cloudbrain.Cloudbrain.WorkServerNumber - } else { - worker_server_num = 1 - } - if cloudbrain.Cloudbrain.Spec == nil { - cardNum = 1 - } else { - cardNum = cloudbrain.Cloudbrain.Spec.AccCardsNum - } - duration := cloudbrain.Duration - durationSum := cloudbrain.Duration * int64(worker_server_num) * int64(cardNum) - if cloudbrain.Cloudbrain.Type == models.TypeCloudBrainOne { - cloudBrainOneDuration += duration - cloudBrainOneCardDuSum += durationSum - } else if cloudbrain.Cloudbrain.Type == models.TypeCloudBrainTwo { - cloudBrainTwoDuration += duration - cloudBrainTwoCardDuSum += durationSum - } else if cloudbrain.Cloudbrain.Type == models.TypeC2Net { - c2NetDuration += duration - c2NetCardDuSum += durationSum - } else if cloudbrain.Cloudbrain.Type == models.TypeCDCenter { - cDCenterDuration += duration - cDNetCardDuSum += durationSum - } - - durationAllSum += duration - cardDuSum += durationSum - count = len(cloudbrains) - page += 1 + for _, cloudbrain := range cloudbrains { + if cloudbrain.Cloudbrain.WorkServerNumber >= 1 { + worker_server_num = cloudbrain.Cloudbrain.WorkServerNumber + } else { + worker_server_num = 1 } + if cloudbrain.Cloudbrain.Spec == nil { + cardNum = 1 + } else { + cardNum = cloudbrain.Cloudbrain.Spec.AccCardsNum + } + duration := cloudbrain.Duration + durationSum := cloudbrain.Duration * int64(worker_server_num) * int64(cardNum) + if cloudbrain.Cloudbrain.Type == models.TypeCloudBrainOne { + cloudBrainOneDuration += duration + cloudBrainOneCardDuSum += durationSum + } else if cloudbrain.Cloudbrain.Type == models.TypeCloudBrainTwo { + cloudBrainTwoDuration += duration + cloudBrainTwoCardDuSum += durationSum + } else if cloudbrain.Cloudbrain.Type == models.TypeC2Net { + c2NetDuration += duration + c2NetCardDuSum += durationSum + } else if cloudbrain.Cloudbrain.Type == models.TypeCDCenter { + cDCenterDuration += duration + cDNetCardDuSum += durationSum + } + + durationAllSum += duration + cardDuSum += durationSum } ctx.JSON(http.StatusOK, map[string]interface{}{ "cloudBrainOneCardDuSum": cloudBrainOneCardDuSum, @@ -532,6 +522,21 @@ func getPageDateCloudbrainInfo(dateCloudbrainInfo []DateCloudbrainInfo, page int } +func getPageDateCloudbrainDuration(dateUsageStatistic []models.DateUsageStatistic, page int, pagesize int) []models.DateUsageStatistic { + begin := (page - 1) * pagesize + end := (page) * pagesize + + if begin > len(dateUsageStatistic)-1 { + return nil + } + if end > len(dateUsageStatistic)-1 { + return dateUsageStatistic[begin:] + } else { + return dateUsageStatistic[begin:end] + } + +} + func GetAllCloudbrainsPeriodDistribution(ctx *context.Context) { queryType := ctx.QueryTrim("type") beginTimeStr := ctx.QueryTrim("beginTime") @@ -545,7 +550,7 @@ func GetAllCloudbrainsPeriodDistribution(ctx *context.Context) { recordBeginTime := time.Unix(int64(recordCloudbrain[0].Cloudbrain.CreatedUnix), 0) beginTime, endTime, err := getCloudbrainTimePeroid(ctx, recordBeginTime) if err != nil { - log.Error("Parameter is wrong", err) + log.Error("getCloudbrainTimePeroid error:", err) ctx.Error(http.StatusBadRequest, ctx.Tr("repo.parameter_is_wrong")) return } @@ -727,7 +732,7 @@ func GetCloudbrainsDetailData(ctx *context.Context) { keyword := strings.Trim(ctx.Query("q"), " ") - ciTasks, count, err := models.CloudbrainAll(&models.CloudbrainsOptions{ + ciTasks, _, err := models.CloudbrainAll(&models.CloudbrainsOptions{ ListOptions: models.ListOptions{ Page: page, PageSize: pageSize, @@ -742,8 +747,8 @@ func GetCloudbrainsDetailData(ctx *context.Context) { NeedRepoInfo: true, BeginTimeUnix: int64(recordBeginTime), EndTimeUnix: endTime.Unix(), - AiCenter: aiCenter, - NeedDeleteInfo: needDeleteInfo, + // AiCenter: aiCenter, + NeedDeleteInfo: needDeleteInfo, }) if err != nil { ctx.ServerError("Get job failed:", err) @@ -753,43 +758,45 @@ func GetCloudbrainsDetailData(ctx *context.Context) { nilTime := time.Time{} tasks := []models.TaskDetail{} for i, task := range ciTasks { - ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource - var taskDetail models.TaskDetail - taskDetail.ID = ciTasks[i].Cloudbrain.ID - taskDetail.JobID = ciTasks[i].Cloudbrain.JobID - taskDetail.JobName = ciTasks[i].JobName - taskDetail.DisplayJobName = ciTasks[i].DisplayJobName - taskDetail.Status = ciTasks[i].Status - taskDetail.JobType = ciTasks[i].JobType - taskDetail.CreatedUnix = ciTasks[i].Cloudbrain.CreatedUnix - taskDetail.RunTime = ciTasks[i].Cloudbrain.TrainJobDuration - taskDetail.StartTime = ciTasks[i].StartTime - taskDetail.EndTime = ciTasks[i].EndTime - taskDetail.ComputeResource = ciTasks[i].ComputeResource - taskDetail.Type = ciTasks[i].Cloudbrain.Type - taskDetail.UserName = ciTasks[i].User.Name - taskDetail.RepoID = ciTasks[i].RepoID - if ciTasks[i].Repo != nil { - taskDetail.RepoName = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Name - taskDetail.RepoAlias = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Alias - } - if ciTasks[i].Cloudbrain.WorkServerNumber >= 1 { - taskDetail.WorkServerNum = int64(ciTasks[i].Cloudbrain.WorkServerNumber) - } else { - taskDetail.WorkServerNum = 1 - } - taskDetail.CardDuration = repo.GetCloudbrainCardDuration(ciTasks[i].Cloudbrain) - taskDetail.WaitTime = repo.GetCloudbrainWaitTime(ciTasks[i].Cloudbrain) + if aiCenter == "" || aiCenter == task.Cloudbrain.Spec.AiCenterCode { + ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource + var taskDetail models.TaskDetail + taskDetail.ID = ciTasks[i].Cloudbrain.ID + taskDetail.JobID = ciTasks[i].Cloudbrain.JobID + taskDetail.JobName = ciTasks[i].JobName + taskDetail.DisplayJobName = ciTasks[i].DisplayJobName + taskDetail.Status = ciTasks[i].Status + taskDetail.JobType = ciTasks[i].JobType + taskDetail.CreatedUnix = ciTasks[i].Cloudbrain.CreatedUnix + taskDetail.RunTime = ciTasks[i].Cloudbrain.TrainJobDuration + taskDetail.StartTime = ciTasks[i].StartTime + taskDetail.EndTime = ciTasks[i].EndTime + taskDetail.ComputeResource = ciTasks[i].ComputeResource + taskDetail.Type = ciTasks[i].Cloudbrain.Type + taskDetail.UserName = ciTasks[i].User.Name + taskDetail.RepoID = ciTasks[i].RepoID + if ciTasks[i].Repo != nil { + taskDetail.RepoName = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Name + taskDetail.RepoAlias = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Alias + } + if ciTasks[i].Cloudbrain.WorkServerNumber >= 1 { + taskDetail.WorkServerNum = int64(ciTasks[i].Cloudbrain.WorkServerNumber) + } else { + taskDetail.WorkServerNum = 1 + } + taskDetail.CardDuration = repo.GetCloudbrainCardDuration(ciTasks[i].Cloudbrain) + taskDetail.WaitTime = repo.GetCloudbrainWaitTime(ciTasks[i].Cloudbrain) - if ciTasks[i].Cloudbrain.DeletedAt != nilTime || ciTasks[i].Repo == nil { - taskDetail.IsDelete = true - } else { - taskDetail.IsDelete = false + if ciTasks[i].Cloudbrain.DeletedAt != nilTime || ciTasks[i].Repo == nil { + taskDetail.IsDelete = true + } else { + taskDetail.IsDelete = false + } + taskDetail.Spec = ciTasks[i].Spec + tasks = append(tasks, taskDetail) } - taskDetail.Spec = ciTasks[i].Spec - tasks = append(tasks, taskDetail) } - + count := int64(len(tasks)) pager := context.NewPagination(int(count), pageSize, page, getTotalPage(count, pageSize)) pager.SetDefaultParams(ctx) pager.AddParam(ctx, "listType", "ListType") @@ -1403,3 +1410,424 @@ func getCloudbrainTimePeroid(ctx *context.Context, recordBeginTime time.Time) (t return beginTime, endTime, nil } + +func GetCloudbrainResourceOverview(ctx *context.Context) { + recordCloudbrainDuration, err := models.GetDurationRecordBeginTime() + if err != nil { + log.Error("Can not get GetDurationRecordBeginTime", err) + return + } + recordBeginTime := recordCloudbrainDuration[0].CreatedUnix + recordUpdateTime := time.Now().Unix() + resourceQueues, err := models.GetCanUseCardInfo() + if err != nil { + log.Info("GetCanUseCardInfo err: %v", err) + return + } + OpenIResourceDetail := []models.ResourceDetail{} + C2NetResourceDetail := []models.ResourceDetail{} + for _, resourceQueue := range resourceQueues { + if resourceQueue.Cluster == models.OpenICluster { + var resourceDetail models.ResourceDetail + resourceDetail.QueueCode = resourceQueue.QueueCode + resourceDetail.Cluster = resourceQueue.Cluster + resourceDetail.AiCenterCode = resourceQueue.AiCenterCode + resourceDetail.AiCenterName = resourceQueue.AiCenterName + "/" + resourceQueue.AiCenterCode + resourceDetail.ComputeResource = resourceQueue.ComputeResource + resourceDetail.AccCardType = resourceQueue.AccCardType + "(" + resourceQueue.ComputeResource + ")" + resourceDetail.CardsTotalNum = resourceQueue.CardsTotalNum + resourceDetail.IsAutomaticSync = resourceQueue.IsAutomaticSync + OpenIResourceDetail = append(OpenIResourceDetail, resourceDetail) + } + if resourceQueue.Cluster == models.C2NetCluster { + var resourceDetail models.ResourceDetail + resourceDetail.QueueCode = resourceQueue.QueueCode + resourceDetail.Cluster = resourceQueue.Cluster + resourceDetail.AiCenterCode = resourceQueue.AiCenterCode + resourceDetail.AiCenterName = resourceQueue.AiCenterName + "/" + resourceQueue.AiCenterCode + resourceDetail.ComputeResource = resourceQueue.ComputeResource + resourceDetail.AccCardType = resourceQueue.AccCardType + "(" + resourceQueue.ComputeResource + ")" + resourceDetail.CardsTotalNum = resourceQueue.CardsTotalNum + resourceDetail.IsAutomaticSync = resourceQueue.IsAutomaticSync + C2NetResourceDetail = append(C2NetResourceDetail, resourceDetail) + } + } + openIResourceNum := make(map[string]map[string]int) + + for _, openIResourceDetail := range OpenIResourceDetail { + if _, ok := openIResourceNum[openIResourceDetail.AiCenterName]; !ok { + openIResourceNum[openIResourceDetail.AiCenterName] = make(map[string]int) + } + if _, ok := openIResourceNum[openIResourceDetail.AiCenterName][openIResourceDetail.AccCardType]; !ok { + openIResourceNum[openIResourceDetail.AiCenterName][openIResourceDetail.AccCardType] = openIResourceDetail.CardsTotalNum + } else { + openIResourceNum[openIResourceDetail.AiCenterName][openIResourceDetail.AccCardType] += openIResourceDetail.CardsTotalNum + } + } + + c2NetResourceNum := make(map[string]map[string]int) + for _, c2NetResourceDetail := range C2NetResourceDetail { + if _, ok := c2NetResourceNum[c2NetResourceDetail.AiCenterName]; !ok { + c2NetResourceNum[c2NetResourceDetail.AiCenterName] = make(map[string]int) + } + if _, ok := c2NetResourceNum[c2NetResourceDetail.AiCenterName][c2NetResourceDetail.AccCardType]; !ok { + c2NetResourceNum[c2NetResourceDetail.AiCenterName][c2NetResourceDetail.AccCardType] = c2NetResourceDetail.CardsTotalNum + } else { + c2NetResourceNum[c2NetResourceDetail.AiCenterName][c2NetResourceDetail.AccCardType] += c2NetResourceDetail.CardsTotalNum + } + + } + + ctx.JSON(http.StatusOK, map[string]interface{}{ + "openI": openIResourceNum, + "c2Net": c2NetResourceNum, + "recordUpdateTime": recordUpdateTime, + "recordBeginTime": recordBeginTime, + }) +} + +func GetCloudbrainResourceUsageDetail(ctx *context.Context) { + aiCenterCode := ctx.QueryTrim("aiCenterCode") + if aiCenterCode == "" { + aiCenterCode = models.AICenterOfCloudBrainOne + } + beginTime, endTime := getBeginAndEndTime(ctx) + dayCloudbrainDuration, count, err := getDayCloudbrainDuration(beginTime, endTime, aiCenterCode) + if err != nil { + log.Error("Can not query dayCloudbrainDuration.", err) + return + } + hourCloudbrainDuration, err := getHourCloudbrainDuration(beginTime, endTime, aiCenterCode) + if err != nil { + log.Error("Can not query hourCloudbrainDuration.", err) + return + } + page := ctx.QueryInt("page") + if page <= 0 { + page = 1 + } + pagesize := ctx.QueryInt("pagesize") + if pagesize <= 0 { + pagesize = 36500 + } + pageDateCloudbrainDuration := getPageDateCloudbrainDuration(dayCloudbrainDuration, page, pagesize) + ctx.JSON(http.StatusOK, map[string]interface{}{ + "totalCount": count, + "pageDateCloudbrainDuration": pageDateCloudbrainDuration, + "hourCloudbrainDuration": hourCloudbrainDuration, + }) +} + +func GetDurationRateStatistic(ctx *context.Context) { + beginTime, endTime := getBeginAndEndTime(ctx) + OpenIDurationRate, C2NetDurationRate, totalUsageRate := getDurationStatistic(beginTime, endTime) + + ctx.JSON(http.StatusOK, map[string]interface{}{ + "openIDurationRate": OpenIDurationRate, + "c2NetDurationRate": C2NetDurationRate, + "totalUsageRate": totalUsageRate, + }) + +} + +func CloudbrainDurationStatisticForTest(ctx *context.Context) { + repo.CloudbrainDurationStatisticHour() + ctx.JSON(http.StatusOK, map[string]interface{}{ + "message": 0, + }) +} + +func getBeginAndEndTime(ctx *context.Context) (time.Time, time.Time) { + queryType := ctx.QueryTrim("type") + now := time.Now() + beginTimeStr := ctx.QueryTrim("beginTime") + endTimeStr := ctx.QueryTrim("endTime") + + var beginTime time.Time + var endTime time.Time + var err error + if queryType != "" { + if queryType == "all" { + recordCloudbrainDuration, err := models.GetDurationRecordBeginTime() + if err != nil { + log.Error("Can not get GetDurationRecordBeginTime", err) + ctx.Error(http.StatusBadRequest, ctx.Tr("repo.record_begintime_get_err")) + return beginTime, endTime + } + brainRecordBeginTime := recordCloudbrainDuration[0].CreatedUnix.AsTime() + beginTime = brainRecordBeginTime + endTime = now + } else if queryType == "today" { + beginTime = now.AddDate(0, 0, 0) + beginTime = time.Date(beginTime.Year(), beginTime.Month(), beginTime.Day(), 0, 0, 0, 0, now.Location()) + endTime = now + + } else if queryType == "yesterday" { + beginTime = now.AddDate(0, 0, -1) + beginTime = time.Date(beginTime.Year(), beginTime.Month(), beginTime.Day(), 0, 0, 0, 0, now.Location()) + endTime = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + } else if queryType == "last_7day" { + beginTime = now.AddDate(0, 0, -6) + beginTime = time.Date(beginTime.Year(), beginTime.Month(), beginTime.Day(), 0, 0, 0, 0, now.Location()) + endTime = now + } else if queryType == "last_30day" { + beginTime = now.AddDate(0, 0, -29) + beginTime = time.Date(beginTime.Year(), beginTime.Month(), beginTime.Day(), 0, 0, 0, 0, now.Location()) + endTime = now + } else if queryType == "current_month" { + endTime = now + beginTime = time.Date(endTime.Year(), endTime.Month(), 1, 0, 0, 0, 0, now.Location()) + + } else if queryType == "current_year" { + endTime = now + beginTime = time.Date(endTime.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + } else if queryType == "last_month" { + lastMonthTime := now.AddDate(0, -1, 0) + beginTime = time.Date(lastMonthTime.Year(), lastMonthTime.Month(), 1, 0, 0, 0, 0, now.Location()) + endTime = time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + } + + } else { + if beginTimeStr == "" || endTimeStr == "" { + //如果查询类型和开始时间结束时间都未设置,按queryType=all处理 + recordCloudbrainDuration, err := models.GetDurationRecordBeginTime() + if err != nil { + log.Error("Can not get recordCloudbrain", err) + ctx.Error(http.StatusBadRequest, ctx.Tr("repo.record_begintime_get_err")) + return beginTime, endTime + } + brainRecordBeginTime := recordCloudbrainDuration[0].CreatedUnix.AsTime() + beginTime = brainRecordBeginTime + endTime = now + } else { + beginTime, err = time.ParseInLocation("2006-01-02", beginTimeStr, time.Local) + if err != nil { + log.Error("Can not ParseInLocation.", err) + ctx.Error(http.StatusBadRequest, ctx.Tr("ParseInLocation_get_error")) + return beginTime, endTime + } + endTime, err = time.ParseInLocation("2006-01-02", endTimeStr, time.Local) + if err != nil { + log.Error("Can not ParseInLocation.", err) + ctx.Error(http.StatusBadRequest, ctx.Tr("ParseInLocation_get_error")) + return beginTime, endTime + } + if endTime.After(time.Now()) { + endTime = time.Now() + } + } + + } + return beginTime, endTime +} + +func getAiCenterUsageDuration(beginTime time.Time, endTime time.Time, cloudbrainStatistics []*models.CloudbrainDurationStatistic) (int, int, float64) { + totalDuration := int(0) + usageDuration := int(0) + usageRate := float64(0) + + for _, cloudbrainStatistic := range cloudbrainStatistics { + if int64(cloudbrainStatistic.CreatedUnix) >= beginTime.Unix() && int64(cloudbrainStatistic.CreatedUnix) < endTime.Unix() { + totalDuration += cloudbrainStatistic.CardsTotalDuration + usageDuration += cloudbrainStatistic.CardsUseDuration + } + } + if totalDuration == 0 || usageDuration == 0 { + usageRate = 0 + } else { + usageRate = float64(usageDuration) / float64(totalDuration) + } + + return totalDuration, usageDuration, usageRate +} + +func getDurationStatistic(beginTime time.Time, endTime time.Time) (models.DurationRateStatistic, models.DurationRateStatistic, float64) { + OpenITotalDuration := make(map[string]int) + OpenIUsageDuration := make(map[string]int) + OpenIUsageRate := make(map[string]float64) + + C2NetTotalDuration := make(map[string]int) + C2NetUsageDuration := make(map[string]int) + OpenIDurationRate := models.DurationRateStatistic{} + C2NetDurationRate := models.DurationRateStatistic{} + cardDurationStatistics, err := models.GetCardDurationStatistics(&models.DurationStatisticOptions{ + BeginTime: beginTime, + EndTime: endTime, + }) + if err != nil { + log.Error("GetCardDurationStatistics error:", err) + return OpenIDurationRate, C2NetDurationRate, 0 + } + for _, cloudbrainStatistic := range cardDurationStatistics { + if cloudbrainStatistic.Cluster == models.OpenICluster { + if _, ok := OpenITotalDuration[cloudbrainStatistic.AiCenterName]; !ok { + OpenITotalDuration[cloudbrainStatistic.AiCenterName] = cloudbrainStatistic.CardsTotalDuration + } else { + OpenITotalDuration[cloudbrainStatistic.AiCenterName] += cloudbrainStatistic.CardsTotalDuration + } + if _, ok := OpenIUsageDuration[cloudbrainStatistic.AiCenterName]; !ok { + OpenIUsageDuration[cloudbrainStatistic.AiCenterName] = cloudbrainStatistic.CardsUseDuration + } else { + OpenIUsageDuration[cloudbrainStatistic.AiCenterName] += cloudbrainStatistic.CardsUseDuration + } + } + if cloudbrainStatistic.Cluster == models.C2NetCluster { + if _, ok := C2NetTotalDuration[cloudbrainStatistic.AiCenterName]; !ok { + C2NetTotalDuration[cloudbrainStatistic.AiCenterName] = cloudbrainStatistic.CardsTotalDuration + } else { + C2NetTotalDuration[cloudbrainStatistic.AiCenterName] += cloudbrainStatistic.CardsTotalDuration + } + if _, ok := C2NetUsageDuration[cloudbrainStatistic.AiCenterName]; !ok { + C2NetUsageDuration[cloudbrainStatistic.AiCenterName] = cloudbrainStatistic.CardsUseDuration + } else { + C2NetUsageDuration[cloudbrainStatistic.AiCenterName] += cloudbrainStatistic.CardsUseDuration + } + } + } + ResourceAiCenterRes, err := models.GetResourceAiCenters() + if err != nil { + log.Error("Can not get ResourceAiCenterRes.", err) + return OpenIDurationRate, C2NetDurationRate, 0 + } + for _, v := range ResourceAiCenterRes { + if cutString(v.AiCenterCode, 4) == cutString(models.AICenterOfCloudBrainOne, 4) { + if _, ok := OpenIUsageDuration[v.AiCenterName]; !ok { + OpenIUsageDuration[v.AiCenterName] = 0 + } + if _, ok := OpenITotalDuration[v.AiCenterName]; !ok { + OpenITotalDuration[v.AiCenterName] = 0 + } + } else { + if _, ok := C2NetUsageDuration[v.AiCenterName]; !ok { + C2NetUsageDuration[v.AiCenterName] = 0 + } + } + } + totalCanUse := float64(0) + totalUse := float64(0) + totalUsageRate := float64(0) + for k, v := range OpenITotalDuration { + for i, j := range OpenIUsageDuration { + if k == i { + OpenIUsageRate[k] = float64(j) / float64(v) + } + } + } + for _, v := range OpenITotalDuration { + totalCanUse += float64(v) + } + for _, v := range OpenIUsageRate { + totalUse += float64(v) + } + if totalCanUse == 0 || totalUse == 0 { + totalUsageRate = 0 + } else { + totalUsageRate = totalUse / totalCanUse + } + + OpenIDurationRate.AiCenterTotalDurationStat = OpenITotalDuration + OpenIDurationRate.AiCenterUsageDurationStat = OpenIUsageDuration + OpenIDurationRate.UsageRate = OpenIUsageRate + C2NetDurationRate.AiCenterTotalDurationStat = C2NetTotalDuration + C2NetDurationRate.AiCenterUsageDurationStat = C2NetUsageDuration + return OpenIDurationRate, C2NetDurationRate, totalUsageRate +} + +func cutString(str string, lens int) string { + if len(str) < lens { + return str + } + return str[:lens] +} + +func getDayCloudbrainDuration(beginTime time.Time, endTime time.Time, aiCenterCode string) ([]models.DateUsageStatistic, int, error) { + now := time.Now() + endTimeTemp := time.Date(endTime.Year(), endTime.Month(), endTime.Day(), 0, 0, 0, 0, now.Location()) + if endTimeTemp.Equal(endTime) { + endTimeTemp = endTimeTemp.AddDate(0, 0, -1) + } + cardDurationStatistics, err := models.GetCardDurationStatistics(&models.DurationStatisticOptions{ + BeginTime: beginTime, + EndTime: endTime, + AiCenterCode: aiCenterCode, + }) + if err != nil { + log.Error("GetCardDurationStatistics error:", err) + return nil, 0, err + } + + dayCloudbrainInfo := make([]models.DateUsageStatistic, 0) + count := 0 + for beginTime.Before(endTimeTemp) || beginTime.Equal(endTimeTemp) { + TotalDuration, UsageDuration, UsageRate := getAiCenterUsageDuration(endTimeTemp, endTime, cardDurationStatistics) + dayCloudbrainInfo = append(dayCloudbrainInfo, models.DateUsageStatistic{ + Date: endTimeTemp.Format("2006/01/02"), + UsageDuration: UsageDuration, + TotalDuration: TotalDuration, + UsageRate: UsageRate, + }) + endTime = endTimeTemp + endTimeTemp = endTimeTemp.AddDate(0, 0, -1) + if endTimeTemp.Before(beginTime) && beginTime.Before(endTime) { + endTimeTemp = beginTime + } + count += 1 + } + return dayCloudbrainInfo, count, nil +} + +func getHourCloudbrainDuration(beginTime time.Time, endTime time.Time, aiCenterCode string) (models.HourTimeStatistic, error) { + hourTimeTotalDuration := make(map[string]int) + hourTimeUsageDuration := make(map[string]int) + hourTimeUsageRate := make(map[string]float64) + hourTimeStatistic := models.HourTimeStatistic{} + + cardDurationStatistics, err := models.GetCardDurationStatistics(&models.DurationStatisticOptions{ + BeginTime: beginTime, + EndTime: endTime, + }) + if err != nil { + log.Error("GetCardDurationStatistics error:", err) + return hourTimeStatistic, err + } + for _, cloudbrainStatistic := range cardDurationStatistics { + if cloudbrainStatistic.AiCenterCode == aiCenterCode { + if _, ok := hourTimeTotalDuration[strconv.Itoa(cloudbrainStatistic.HourTime)]; !ok { + hourTimeTotalDuration[strconv.Itoa(cloudbrainStatistic.HourTime)] = cloudbrainStatistic.CardsTotalDuration + } else { + hourTimeTotalDuration[strconv.Itoa(cloudbrainStatistic.HourTime)] += cloudbrainStatistic.CardsTotalDuration + } + if _, ok := hourTimeUsageDuration[strconv.Itoa(cloudbrainStatistic.HourTime)]; !ok { + hourTimeUsageDuration[strconv.Itoa(cloudbrainStatistic.HourTime)] = cloudbrainStatistic.CardsUseDuration + } else { + hourTimeUsageDuration[strconv.Itoa(cloudbrainStatistic.HourTime)] += cloudbrainStatistic.CardsUseDuration + } + } + } + hourTimeList := []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23"} + for _, v := range hourTimeList { + if _, ok := hourTimeUsageDuration[v]; !ok { + hourTimeUsageDuration[v] = 0 + } + if _, ok := hourTimeTotalDuration[v]; !ok { + hourTimeTotalDuration[v] = 0 + } + } + + for k, v := range hourTimeTotalDuration { + for i, j := range hourTimeUsageDuration { + if k == i { + if v == 0 || j == 0 { + hourTimeUsageRate[k] = 0 + } else { + hourTimeUsageRate[k] = float64(j) / float64(v) + } + } + } + } + + hourTimeStatistic.HourTimeTotalDuration = hourTimeTotalDuration + hourTimeStatistic.HourTimeUsageDuration = hourTimeUsageDuration + hourTimeStatistic.HourTimeUsageRate = hourTimeUsageRate + return hourTimeStatistic, nil +} diff --git a/routers/api/v1/repo/migrate.go b/routers/api/v1/repo/migrate.go index fd0db7814..2f28b0bd3 100644 --- a/routers/api/v1/repo/migrate.go +++ b/routers/api/v1/repo/migrate.go @@ -6,6 +6,8 @@ package repo import ( "bytes" + "code.gitea.io/gitea/modules/task" + "code.gitea.io/gitea/routers/response" "errors" "fmt" "net/http" @@ -216,3 +218,146 @@ func handleMigrateError(ctx *context.APIContext, repoOwner *models.User, remoteA } } } + +func MigrateSubmit(ctx *context.APIContext, form auth.MigrateRepoForm) { + log.Info("receive MigrateSubmit request") + ctxUser, bizErr := checkContextUser(ctx, form.UID) + if bizErr != nil { + ctx.JSON(http.StatusOK, response.ResponseError(bizErr)) + return + } + + remoteAddr, err := form.ParseRemoteAddr(ctx.User) + if err != nil { + if models.IsErrInvalidCloneAddr(err) { + addrErr := err.(models.ErrInvalidCloneAddr) + switch { + case addrErr.IsURLError: + ctx.JSON(http.StatusOK, response.PARAM_ERROR) + case addrErr.IsPermissionDenied: + ctx.JSON(http.StatusOK, response.INSUFFICIENT_PERMISSION) + case addrErr.IsInvalidPath: + ctx.JSON(http.StatusOK, response.PARAM_ERROR) + default: + ctx.JSON(http.StatusOK, response.SYSTEM_ERROR) + } + } else { + ctx.JSON(http.StatusOK, response.SYSTEM_ERROR) + } + return + } + + var gitServiceType = api.PlainGitService + u, err := url.Parse(form.CloneAddr) + if err == nil && strings.EqualFold(u.Host, "github.com") { + gitServiceType = api.GithubService + } + + var opts = migrations.MigrateOptions{ + OriginalURL: form.CloneAddr, + GitServiceType: gitServiceType, + CloneAddr: remoteAddr, + RepoName: form.RepoName, + Alias: form.Alias, + Description: form.Description, + Private: form.Private || setting.Repository.ForcePrivate, + Mirror: form.Mirror, + AuthUsername: form.AuthUsername, + AuthPassword: form.AuthPassword, + Wiki: form.Wiki, + Issues: form.Issues, + Milestones: form.Milestones, + Labels: form.Labels, + Comments: true, + PullRequests: form.PullRequests, + Releases: form.Releases, + } + if opts.Mirror { + opts.Issues = false + opts.Milestones = false + opts.Labels = false + opts.Comments = false + opts.PullRequests = false + opts.Releases = false + } + + err = models.CheckCreateRepository(ctx.User, ctxUser, opts.RepoName, opts.Alias) + if err != nil { + handleMigrateError4Api(ctx, ctxUser, remoteAddr, err) + return + } + + err = task.MigrateRepository(ctx.User, ctxUser, opts) + if err == nil { + r := make(map[string]string) + r["OpenIUrl"] = strings.TrimSuffix(setting.AppURL, "/") + "/" + ctxUser.Name + "/" + opts.RepoName + r["OriginUrl"] = form.CloneAddr + ctx.JSON(http.StatusOK, response.SuccessWithData(r)) + return + } + + handleMigrateError4Api(ctx, ctxUser, remoteAddr, err) +} + +func checkContextUser(ctx *context.APIContext, uid int64) (*models.User, *response.BizError) { + if uid == ctx.User.ID || uid == 0 { + return ctx.User, nil + } + + org, err := models.GetUserByID(uid) + if models.IsErrUserNotExist(err) { + return ctx.User, nil + } + + if err != nil { + return nil, response.SYSTEM_ERROR + } + + // Check ownership of organization. + if !org.IsOrganization() { + return nil, nil + } + if !ctx.User.IsAdmin { + canCreate, err := org.CanCreateOrgRepo(ctx.User.ID) + if err != nil { + return nil, response.NewBizError(err) + } else if !canCreate { + return nil, response.INSUFFICIENT_PERMISSION + } + } + return org, nil +} + +func handleMigrateError4Api(ctx *context.APIContext, repoOwner *models.User, remoteAddr string, err error) { + switch { + case models.IsErrRepoAlreadyExist(err): + ctx.JSON(http.StatusOK, response.Error(3, "The repository with the same name already exists.")) + case migrations.IsRateLimitError(err): + ctx.JSON(http.StatusOK, response.ServerError("Remote visit addressed rate limitation.")) + case migrations.IsTwoFactorAuthError(err): + ctx.JSON(http.StatusOK, response.ServerError("Remote visit required two factors authentication.")) + case models.IsErrReachLimitOfRepo(err): + ctx.JSON(http.StatusOK, response.ServerError(fmt.Sprintf("You have already reached your limit of %d repositories.", repoOwner.MaxCreationLimit()))) + case models.IsErrNameReserved(err): + ctx.JSON(http.StatusOK, response.ServerError(fmt.Sprintf("The username '%s' is reserved.", err.(models.ErrNameReserved).Name))) + case models.IsErrNameCharsNotAllowed(err): + ctx.JSON(http.StatusOK, response.ServerError(fmt.Sprintf("The username '%s' contains invalid characters.", err.(models.ErrNameCharsNotAllowed).Name))) + case models.IsErrNamePatternNotAllowed(err): + ctx.JSON(http.StatusOK, response.ServerError(fmt.Sprintf("The pattern '%s' is not allowed in a username.", err.(models.ErrNamePatternNotAllowed).Pattern))) + default: + err = util.URLSanitizedError(err, remoteAddr) + if strings.Contains(err.Error(), "Authentication failed") || + strings.Contains(err.Error(), "Bad credentials") || + strings.Contains(err.Error(), "could not read Username") { + ctx.JSON(http.StatusOK, response.ServerError(fmt.Sprintf("Authentication failed: %v.", err))) + } else if strings.Contains(err.Error(), "fatal:") { + ctx.JSON(http.StatusOK, response.ServerError(fmt.Sprintf("Migration failed: %v.", err))) + } else { + ctx.JSON(http.StatusOK, response.ServerError(err.Error())) + } + } +} + +func QueryRepoSatus(ctx *context.APIContext, form auth.MigrateRepoForm) { + +} diff --git a/routers/api/v1/repo/modelarts.go b/routers/api/v1/repo/modelarts.go index 79e35812e..5a0e21ed8 100755 --- a/routers/api/v1/repo/modelarts.go +++ b/routers/api/v1/repo/modelarts.go @@ -12,6 +12,8 @@ import ( "strconv" "strings" + "code.gitea.io/gitea/modules/urfs_client/urchin" + "code.gitea.io/gitea/modules/notification" "code.gitea.io/gitea/modules/grampus" @@ -25,6 +27,7 @@ import ( "code.gitea.io/gitea/modules/storage" "code.gitea.io/gitea/modules/timeutil" routerRepo "code.gitea.io/gitea/routers/repo" + cloudbrainService "code.gitea.io/gitea/services/cloudbrain" ) func GetModelArtsNotebook2(ctx *context.APIContext) { @@ -49,6 +52,7 @@ func GetModelArtsNotebook2(ctx *context.APIContext) { "JobName": job.JobName, "JobStatus": job.Status, "JobDuration": job.TrainJobDuration, + "StartTime": job.StartTime, }) } @@ -169,17 +173,20 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { if len(result.JobInfo.Tasks) > 0 { if len(result.JobInfo.Tasks[0].CenterID) > 0 && len(result.JobInfo.Tasks[0].CenterName) > 0 { job.AiCenter = result.JobInfo.Tasks[0].CenterID[0] + "+" + result.JobInfo.Tasks[0].CenterName[0] - aiCenterName = result.JobInfo.Tasks[0].CenterName[0] + // aiCenterName = result.JobInfo.Tasks[0].CenterName[0] + aiCenterName = cloudbrainService.GetAiCenterShow(job.AiCenter, ctx.Context) } } } else { - temp := strings.Split(job.AiCenter, "+") - if len(temp) > 1 { - aiCenterName = temp[1] - } + aiCenterName = cloudbrainService.GetAiCenterShow(job.AiCenter, ctx.Context) } if oldStatus != job.Status { notification.NotifyChangeCloudbrainStatus(job, oldStatus) + if models.IsTrainJobTerminal(job.Status) && job.ComputeResource == models.NPUResource { + if len(result.JobInfo.Tasks[0].CenterID) == 1 { + urchin.GetBackNpuModel(job.ID, grampus.GetRemoteEndPoint(result.JobInfo.Tasks[0].CenterID[0]), grampus.BucketRemote, grampus.GetNpuModelObjectKey(job.JobName), grampus.GetCenterProxy(setting.Grampus.LocalCenterID)) + } + } } err = models.UpdateTrainJobVersion(job) if err != nil { @@ -192,6 +199,7 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { "JobStatus": job.Status, "JobDuration": job.TrainJobDuration, "AiCenter": aiCenterName, + "StartTime": job.StartTime, }) } @@ -319,6 +327,7 @@ func TrainJobGetLog(ctx *context.APIContext) { "Content": result.Content, "Lines": result.Lines, "CanLogDownload": canLogDownload, + "StartTime": task.StartTime, }) } @@ -458,14 +467,47 @@ func ModelList(ctx *context.APIContext) { return } + status := models.StorageScheduleSucceed var fileInfos []storage.FileInfo if task.ComputeResource == models.NPUResource { - fileInfos, err = storage.GetObsListObject(task.JobName, "output/", parentDir, versionName) + prefix := strings.TrimPrefix(path.Join(setting.TrainJobModelPath, task.JobName, setting.OutPutPath, versionName), "/") + if !strings.HasSuffix(prefix, "/") { + prefix += "/" + } + fileInfos, err = storage.GetOneLevelAllObjectUnderDir(setting.Bucket, prefix, parentDir) if err != nil { log.Info("get TrainJobListModel failed:", err) ctx.ServerError("GetObsListObject:", err) return } + + if task.Type == models.TypeC2Net { + if len(fileInfos) > 0 { + status = models.StorageScheduleSucceed + } else { + if models.IsTrainJobTerminal(task.Status) { + if task.Status == models.GrampusStatusStopped { + status = models.StorageNoFile + } else if task.Status == models.GrampusStatusFailed { + if task.AiCenter == "" { + status = models.StorageNoFile + } + } else { + record, _ := models.GetScheduleRecordByCloudbrainID(task.ID) + if record != nil { + status = record.Status + if status == models.StorageScheduleSucceed { + status = models.StorageNoFile + } + } else { + status = models.StorageScheduleProcessing + } + } + } else { + status = models.StorageScheduleWaiting + } + } + } } else if task.ComputeResource == models.GPUResource { files, err := routerRepo.GetModelDirs(task.JobName, parentDir) if err != nil { @@ -485,7 +527,7 @@ func ModelList(ctx *context.APIContext) { ctx.JSON(http.StatusOK, map[string]interface{}{ "JobID": jobID, "VersionName": versionName, - "StatusOK": 0, + "StatusOK": status, "Path": dirArray, "Dirs": fileInfos, "task": task, @@ -514,6 +556,7 @@ func GetModelArtsInferenceJob(ctx *context.APIContext) { "JobID": jobID, "JobStatus": job.Status, "JobDuration": job.TrainJobDuration, + "StartTime": job.StartTime, }) } diff --git a/routers/badge/badge.go b/routers/badge/badge.go new file mode 100644 index 000000000..6d8725b12 --- /dev/null +++ b/routers/badge/badge.go @@ -0,0 +1,136 @@ +package badge + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/routers/response" + "code.gitea.io/gitea/services/badge" + "errors" + "github.com/unknwon/com" + "net/http" + "strings" +) + +func GetCustomizeBadgeList(ctx *context.Context) { + page := ctx.QueryInt("page") + category := ctx.QueryInt64("category") + pageSize := 50 + n, r, err := badge.GetBadgeList(models.GetBadgeOpts{CategoryId: category, BadgeType: models.CustomizeBadge, ListOpts: models.ListOptions{PageSize: pageSize, Page: page}}) + if err != nil { + log.Error("GetCustomizeBadgeList error.%v", err) + ctx.JSON(http.StatusOK, response.ServerError(err.Error())) + return + } + m := make(map[string]interface{}) + m["List"] = r + m["Total"] = n + m["PageSize"] = pageSize + ctx.JSON(http.StatusOK, response.SuccessWithData(m)) +} + +func OperateBadge(ctx *context.Context, req models.BadgeOperateReq) { + action := ctx.Params(":action") + + var err *response.BizError + switch action { + case "edit": + err = badge.EditBadge(req, ctx.User) + case "new": + err = badge.AddBadge(req, ctx.User) + case "del": + err = badge.DelBadge(req.ID, ctx.User) + default: + err = response.NewBizError(errors.New("action type error")) + } + + if err != nil { + log.Error("OperateBadge error ,%v", err) + ctx.JSON(http.StatusOK, response.ResponseError(err)) + return + } + ctx.JSON(http.StatusOK, response.Success()) +} + +func GetBadgeUsers(ctx *context.Context) { + page := ctx.QueryInt("page") + badgeId := ctx.QueryInt64("badge") + pageSize := 50 + n, r, err := badge.GetBadgeUsers(badgeId, models.ListOptions{PageSize: pageSize, Page: page}) + if err != nil { + log.Error("GetBadgeUsers error.%v", err) + ctx.JSON(http.StatusOK, response.ServerError(err.Error())) + return + } + m := make(map[string]interface{}) + m["List"] = r + m["Total"] = n + m["PageSize"] = pageSize + ctx.JSON(http.StatusOK, response.SuccessWithData(m)) +} + +func AddOperateBadgeUsers(ctx *context.Context, req models.AddBadgeUsersReq) { + userStr := req.Users + if userStr == "" { + ctx.JSON(http.StatusOK, response.Success()) + return + } + userStr = strings.ReplaceAll(userStr, " ", "") + userStr = strings.ReplaceAll(userStr, "\r", "") + userNames := strings.Split(userStr, "\n") + n, err := badge.AddBadgeUsers(req.BadgeId, userNames) + if err != nil { + log.Error("AddOperateBadgeUsers error.%v", err) + ctx.JSON(http.StatusOK, response.ServerError(err.Error())) + return + } + m := make(map[string]interface{}) + m["Total"] = len(userNames) + m["Success"] = n + ctx.JSON(http.StatusOK, response.SuccessWithData(m)) +} + +func DelBadgeUsers(ctx *context.Context, req models.DelBadgeUserReq) { + id := req.ID + if id <= 0 { + ctx.JSON(http.StatusOK, response.Success()) + return + } + + err := badge.DelBadgeUser(id) + if err != nil { + log.Error("DelBadgeUsers error.%v", err) + ctx.JSON(http.StatusOK, response.ServerError(err.Error())) + return + } + ctx.JSON(http.StatusOK, response.Success()) +} + +func UploadIcon(ctx *context.Context, form badge.IconUploadForm) { + + uploader := badge.NewIconUploader(badge.IconUploadConfig{ + FileMaxSize: setting.BadgeIconMaxFileSize, + FileMaxWidth: setting.BadgeIconMaxWidth, + FileMaxHeight: setting.BadgeIconMaxHeight, + NeedSquare: true, + }) + iconName, err := uploader.Upload(form, ctx.User) + if err != nil { + log.Error("UploadIcon error.%v", err) + ctx.JSON(http.StatusOK, response.ServerError(err.Error())) + return + } + m := make(map[string]string, 0) + m["IconName"] = iconName + ctx.JSON(http.StatusOK, response.SuccessWithData(m)) +} + +func GetIcon(ctx *context.Context) { + hash := ctx.Params(":hash") + if !com.IsFile(models.GetCustomIconByHash(hash)) { + ctx.NotFound(ctx.Req.URL.RequestURI(), nil) + return + } + ctx.Redirect(setting.AppSubURL + "/icons/" + hash) +} diff --git a/routers/badge/category.go b/routers/badge/category.go new file mode 100644 index 000000000..71c34e1ba --- /dev/null +++ b/routers/badge/category.go @@ -0,0 +1,50 @@ +package badge + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/routers/response" + "code.gitea.io/gitea/services/badge" + "errors" + "net/http" +) + +func GetBadgeCategoryList(ctx *context.Context) { + page := ctx.QueryInt("page") + pageSize := 50 + n, r, err := badge.GetBadgeCategoryList(models.ListOptions{Page: page, PageSize: pageSize}) + if err != nil { + log.Error("GetCategoryList error.%v", err) + ctx.JSON(http.StatusOK, response.ServerError(err.Error())) + return + } + m := make(map[string]interface{}) + m["List"] = r + m["Total"] = n + m["PageSize"] = pageSize + ctx.JSON(http.StatusOK, response.SuccessWithData(m)) +} + +func OperateBadgeCategory(ctx *context.Context, category models.BadgeCategory4Show) { + action := ctx.Params(":action") + + var err *response.BizError + switch action { + case "edit": + err = badge.EditBadgeCategory(category, ctx.User) + case "new": + err = badge.AddBadgeCategory(category, ctx.User) + case "del": + err = badge.DelBadgeCategory(category.ID, ctx.User) + default: + err = response.NewBizError(errors.New("action type error")) + } + + if err != nil { + log.Error("OperateBadgeCategory error ,%v", err) + ctx.JSON(http.StatusOK, response.ResponseError(err)) + return + } + ctx.JSON(http.StatusOK, response.Success()) +} diff --git a/routers/home.go b/routers/home.go index ac607b5be..aab760611 100755 --- a/routers/home.go +++ b/routers/home.go @@ -41,6 +41,7 @@ const ( tplExploreExploreDataAnalysis base.TplName = "explore/data_analysis" tplHomeTerm base.TplName = "terms" tplHomePrivacy base.TplName = "privacy" + tplResoruceDesc base.TplName = "resource_desc" ) // Home render home page @@ -820,3 +821,7 @@ func HomeTerm(ctx *context.Context) { func HomePrivacy(ctx *context.Context) { ctx.HTML(200, tplHomePrivacy) } + +func HomeResoruceDesc(ctx *context.Context) { + ctx.HTML(200, tplResoruceDesc) +} \ No newline at end of file diff --git a/routers/repo/ai_model_convert.go b/routers/repo/ai_model_convert.go index 9a5874956..bd6a01072 100644 --- a/routers/repo/ai_model_convert.go +++ b/routers/repo/ai_model_convert.go @@ -29,7 +29,9 @@ const ( tplModelConvertInfo = "repo/modelmanage/convertshowinfo" PYTORCH_ENGINE = 0 TENSORFLOW_ENGINE = 1 - MINDSPORE_ENGIN = 2 + MINDSPORE_ENGINE = 2 + PADDLE_ENGINE = 4 + MXNET_ENGINE = 6 ModelMountPath = "/model" CodeMountPath = "/code" DataSetMountPath = "/dataset" @@ -395,6 +397,20 @@ func createGpuTrainJob(modelConvert *models.AiModelConvert, ctx *context.Context deleteLocalDir(relatetiveModelPath) dataActualPath = setting.Attachment.Minio.RealPath + setting.Attachment.Minio.Bucket + "/" + setting.CBCodePathPrefix + modelConvert.ID + "/dataset" } + } else if modelConvert.SrcEngine == PADDLE_ENGINE { + IMAGE_URL = setting.ModelConvert.GPU_PADDLE_IMAGE + if modelConvert.DestFormat == CONVERT_FORMAT_ONNX { + command = getGpuModelConvertCommand(modelConvert.ID, modelConvert.ModelPath, modelConvert, setting.ModelConvert.PaddleOnnxBootFile) + } else { + return errors.New("Not support the format.") + } + } else if modelConvert.SrcEngine == MXNET_ENGINE { + IMAGE_URL = setting.ModelConvert.GPU_MXNET_IMAGE + if modelConvert.DestFormat == CONVERT_FORMAT_ONNX { + command = getGpuModelConvertCommand(modelConvert.ID, modelConvert.ModelPath, modelConvert, setting.ModelConvert.MXnetOnnxBootFile) + } else { + return errors.New("Not support the format.") + } } log.Info("dataActualPath=" + dataActualPath) diff --git a/routers/repo/ai_model_manage.go b/routers/repo/ai_model_manage.go index 5b358b83b..f2b0fc6d1 100644 --- a/routers/repo/ai_model_manage.go +++ b/routers/repo/ai_model_manage.go @@ -17,6 +17,7 @@ import ( "code.gitea.io/gitea/modules/notification" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/storage" + "code.gitea.io/gitea/services/cloudbrain/resource" uuid "github.com/satori/go.uuid" ) @@ -69,13 +70,10 @@ func saveModelByParameters(jobId string, versionName string, name string, versio cloudType = models.TypeCloudBrainTwo } else if aiTask.ComputeResource == models.GPUResource { cloudType = models.TypeCloudBrainOne - var ResourceSpecs *models.ResourceSpecs - json.Unmarshal([]byte(setting.ResourceSpecs), &ResourceSpecs) - for _, tmp := range ResourceSpecs.ResourceSpec { - if tmp.Id == aiTask.ResourceSpecId { - flaverName := ctx.Tr("cloudbrain.gpu_num") + ": " + fmt.Sprint(tmp.GpuNum) + " " + ctx.Tr("cloudbrain.cpu_num") + ": " + fmt.Sprint(tmp.CpuNum) + " " + ctx.Tr("cloudbrain.memory") + "(MB): " + fmt.Sprint(tmp.MemMiB) + " " + ctx.Tr("cloudbrain.shared_memory") + "(MB): " + fmt.Sprint(tmp.ShareMemMiB) - aiTask.FlavorName = flaverName - } + spec, err := resource.GetCloudbrainSpec(aiTask.ID) + if err == nil { + flaverName := "GPU: " + fmt.Sprint(spec.AccCardsNum) + "*" + spec.AccCardType + ",CPU: " + fmt.Sprint(spec.CpuCores) + "," + ctx.Tr("cloudbrain.memory") + ": " + fmt.Sprint(spec.MemGiB) + "GB," + ctx.Tr("cloudbrain.shared_memory") + ": " + fmt.Sprint(spec.ShareMemGiB) + "GB" + aiTask.FlavorName = flaverName } } diff --git a/routers/repo/aisafety.go b/routers/repo/aisafety.go index 5102a6722..e274f808e 100644 --- a/routers/repo/aisafety.go +++ b/routers/repo/aisafety.go @@ -26,7 +26,6 @@ import ( "code.gitea.io/gitea/modules/util" "code.gitea.io/gitea/services/cloudbrain/resource" "code.gitea.io/gitea/services/reward/point/account" - uuid "github.com/satori/go.uuid" ) const ( @@ -37,39 +36,6 @@ const ( tplModelSafetyTestShow = "repo/modelsafety/show" ) -func CloudBrainAiSafetyCreateTest(ctx *context.Context) { - log.Info("start to create CloudBrainAiSafetyCreate") - uuid := uuid.NewV4() - id := uuid.String() - seriaNoParas := ctx.Query("serialNo") - fileName := ctx.Query("fileName") - - //if jobType == string(models.JobTypeBenchmark) { - req := aisafety.TaskReq{ - UnionId: id, - EvalName: "test1", - EvalContent: "test1", - TLPath: "test1", - Indicators: []string{"ACC", "ASS"}, - CDName: "CIFAR10_1000_FGSM", - BDName: "CIFAR10_1000基础数据集", - } - aisafety.GetAlgorithmList() - if seriaNoParas != "" { - aisafety.GetTaskStatus(seriaNoParas) - } else { - jsonStr, err := getJsonContent("http://192.168.207.34:8065/Test_zap1234/openi_aisafety/raw/branch/master/result/" + fileName) - serialNo, err := aisafety.CreateSafetyTask(req, jsonStr) - if err == nil { - log.Info("serialNo=" + serialNo) - time.Sleep(time.Duration(2) * time.Second) - aisafety.GetTaskStatus(serialNo) - } else { - log.Info("CreateSafetyTask error," + err.Error()) - } - } -} - func GetAiSafetyTaskByJob(job *models.Cloudbrain) { if job == nil { log.Error("GetCloudbrainByJobID failed") @@ -325,6 +291,7 @@ func queryTaskStatusFromCloudbrain(job *models.Cloudbrain) { } else { // job.Status = string(models.ModelSafetyTesting) + job.EndTime = 0 err = models.UpdateJob(job) if err != nil { log.Error("UpdateJob failed:", err) @@ -341,6 +308,9 @@ func queryTaskStatusFromModelSafetyTestServer(job *models.Cloudbrain) { if result.Data.Status == 1 { log.Info("The task is running....") } else { + job.EndTime = timeutil.TimeStampNow() + job.Duration = (job.EndTime.AsTime().Unix() - job.StartTime.AsTime().Unix()) / 1000 + job.TrainJobDuration = models.ConvertDurationToStr(job.Duration) if result.Data.Code == 0 { job.ResultJson = result.Data.StandardJson job.Status = string(models.JobSucceeded) @@ -474,6 +444,9 @@ func updateJobFailed(job *models.Cloudbrain, msg string) { //update task failed. job.Status = string(models.ModelArtsTrainJobFailed) job.ResultJson = msg + job.EndTime = timeutil.TimeStampNow() + job.Duration = (job.EndTime.AsTime().Unix() - job.StartTime.AsTime().Unix()) / 1000 + job.TrainJobDuration = models.ConvertDurationToStr(job.Duration) err := models.UpdateJob(job) if err != nil { log.Error("UpdateJob failed:", err) @@ -535,6 +508,8 @@ func AiSafetyCreateForGetGPU(ctx *context.Context) { } else { log.Info("The GPU WaitCount not get") } + NotStopTaskCount, _ := models.GetModelSafetyCountByUserID(ctx.User.ID) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount ctx.HTML(200, tplModelSafetyTestCreateGpu) } @@ -578,6 +553,8 @@ func AiSafetyCreateForGetNPU(ctx *context.Context) { waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "") ctx.Data["WaitCount"] = waitCount log.Info("The NPU WaitCount is " + fmt.Sprint(waitCount)) + NotStopTaskCount, _ := models.GetModelSafetyCountByUserID(ctx.User.ID) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount ctx.HTML(200, tplModelSafetyTestCreateNpu) } @@ -980,6 +957,8 @@ func modelSafetyNewDataPrepare(ctx *context.Context) error { ctx.Data["ckpt_name"] = ctx.Query("ckpt_name") ctx.Data["model_name"] = ctx.Query("model_name") ctx.Data["model_version"] = ctx.Query("model_version") + NotStopTaskCount, _ := models.GetModelSafetyCountByUserID(ctx.User.ID) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount if ctx.QueryInt("type") == models.TypeCloudBrainOne { ctx.Data["type"] = models.TypeCloudBrainOne diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index a2ea7d51b..de0e0e8bf 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -2,6 +2,7 @@ package repo import ( "bufio" + "code.gitea.io/gitea/modules/urfs_client/urchin" "encoding/json" "errors" "fmt" @@ -17,6 +18,7 @@ import ( "code.gitea.io/gitea/modules/dataset" + "code.gitea.io/gitea/services/cloudbrain/cloudbrainTask" "code.gitea.io/gitea/services/cloudbrain/resource" "code.gitea.io/gitea/services/reward/point/account" @@ -107,7 +109,7 @@ func jobNamePrefixValid(s string) string { } -func cloudBrainNewDataPrepare(ctx *context.Context) error { +func cloudBrainNewDataPrepare(ctx *context.Context, jobType string) error { ctx.Data["PageIsCloudBrain"] = true t := time.Now() var displayJobName = jobNamePrefixValid(cutString(ctx.User.Name, 5)) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:] @@ -148,6 +150,8 @@ func cloudBrainNewDataPrepare(ctx *context.Context) error { defaultMode = "alogrithm" } ctx.Data["benchmarkMode"] = defaultMode + NotStopTaskCount, _ := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainOne, jobType) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount if ctx.Cloudbrain != nil { ctx.Data["branch_name"] = ctx.Cloudbrain.BranchName @@ -210,7 +214,7 @@ func prepareCloudbrainOneSpecs(ctx *context.Context) { } func CloudBrainNew(ctx *context.Context) { - err := cloudBrainNewDataPrepare(ctx) + err := cloudBrainNewDataPrepare(ctx, string(models.JobTypeDebug)) if err != nil { ctx.ServerError("get new cloudbrain info failed", err) return @@ -244,7 +248,7 @@ func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { isOk, err := lock.Lock(models.CloudbrainKeyDuration) if !isOk { log.Error("lock processed failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tpl, &form) return } @@ -254,42 +258,42 @@ func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { if err == nil { if len(tasks) != 0 { log.Error("the job name did already exist", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("the job name did already exist", tpl, &form) return } } else { if !models.IsErrJobNotExist(err) { log.Error("system error, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tpl, &form) return } } if !jobNamePattern.MatchString(displayJobName) { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tpl, &form) return } if jobType != string(models.JobTypeBenchmark) && jobType != string(models.JobTypeDebug) && jobType != string(models.JobTypeTrain) { log.Error("jobtype error:", jobType, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("jobtype error", tpl, &form) return } - count, err := models.GetCloudbrainCountByUserID(ctx.User.ID, jobType) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainOne, jobType) if err != nil { log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tpl, &form) return } else { if count >= 1 { log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain.morethanonejob"), tpl, &form) return } @@ -301,7 +305,7 @@ func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { datasetInfos, datasetNames, err = models.GetDatasetInfo(uuids) if err != nil { log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form) return } @@ -312,7 +316,7 @@ func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName) if err != nil || !bootFileExist { log.Error("Get bootfile error:", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tpl, &form) return } @@ -320,7 +324,7 @@ func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { commandTrain, err := getTrainJobCommand(form) if err != nil { log.Error("getTrainJobCommand failed: %v", err) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(err.Error(), tpl, &form) return } @@ -333,7 +337,7 @@ func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { } errStr := loadCodeAndMakeModelPath(repo, codePath, branchName, jobName, cloudbrain.ModelMountPath) if errStr != "" { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr(errStr), tpl, &form) return } @@ -346,14 +350,14 @@ func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { Cluster: models.OpenICluster, AiCenterCode: models.AICenterOfCloudBrainOne}) if err != nil || spec == nil { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("Resource specification not available", tpl, &form) return } if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form) return } @@ -396,7 +400,7 @@ func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { err = cloudbrain.GenerateTask(req) if err != nil { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(err.Error(), tpl, &form) return } @@ -454,7 +458,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra isOk, err := lock.Lock(models.CloudbrainKeyDuration) if !isOk { log.Error("lock processed failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tpl, &form) return } @@ -465,7 +469,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra command, err := getInferenceJobCommand(form) if err != nil { log.Error("getTrainJobCommand failed: %v", err) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(err.Error(), tpl, &form) return } @@ -474,21 +478,21 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra if err == nil { if len(tasks) != 0 { log.Error("the job name did already exist", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("the job name did already exist", tpl, &form) return } } else { if !models.IsErrJobNotExist(err) { log.Error("system error, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tpl, &form) return } } if !jobNamePattern.MatchString(displayJobName) { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tpl, &form) return } @@ -496,21 +500,21 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName) if err != nil || !bootFileExist { log.Error("Get bootfile error:", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tpl, &form) return } - count, err := models.GetCloudbrainCountByUserID(ctx.User.ID, jobType) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainOne, jobType) if err != nil { log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tpl, &form) return } else { if count >= 1 { log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain.morethanonejob"), tpl, &form) return } @@ -521,7 +525,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra } errStr := loadCodeAndMakeModelPath(repo, codePath, branchName, jobName, cloudbrain.ResultPath) if errStr != "" { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr(errStr), tpl, &form) return } @@ -531,7 +535,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra datasetInfos, datasetNames, err := models.GetDatasetInfo(uuid) if err != nil { log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form) return } @@ -541,13 +545,13 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra Cluster: models.OpenICluster, AiCenterCode: models.AICenterOfCloudBrainOne}) if err != nil || spec == nil { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("Resource specification not available", tpl, &form) return } if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form) return } @@ -582,7 +586,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra err = cloudbrain.GenerateTask(req) if err != nil { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(err.Error(), tpl, &form) return } @@ -682,7 +686,7 @@ func CloudBrainRestart(ctx *context.Context) { break } - count, err := models.GetCloudbrainCountByUserID(ctx.User.ID, string(models.JobTypeDebug)) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainOne, string(models.JobTypeDebug)) if err != nil { log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) resultCode = "-1" @@ -749,47 +753,48 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo ctx.NotFound(ctx.Req.URL.RequestURI(), nil) return } - - result, err := cloudbrain.GetJob(task.JobID) - if err != nil { - log.Info("error:" + err.Error()) - ctx.NotFound(ctx.Req.URL.RequestURI(), nil) - return - } prepareSpec4Show(ctx, task) if ctx.Written() { return } + if task.Status == string(models.JobWaiting) || task.Status == string(models.JobRunning) { + result, err := cloudbrain.GetJob(task.JobID) + if err != nil { + log.Info("error:" + err.Error()) + ctx.NotFound(ctx.Req.URL.RequestURI(), nil) + return + } + + if result != nil { + jobRes, _ := models.ConvertToJobResultPayload(result.Payload) + taskRoles := jobRes.TaskRoles + taskRes, _ := models.ConvertToTaskPod(taskRoles[cloudbrain.SubTaskName].(map[string]interface{})) + ctx.Data["taskRes"] = taskRes + ctx.Data["ExitDiagnostics"] = taskRes.TaskStatuses[0].ExitDiagnostics + oldStatus := task.Status + task.Status = taskRes.TaskStatuses[0].State + task.ContainerIp = "" + task.ContainerID = taskRes.TaskStatuses[0].ContainerID + models.ParseAndSetDurationFromCloudBrainOne(jobRes, task) + + if task.DeletedAt.IsZero() { //normal record + if oldStatus != task.Status { + notification.NotifyChangeCloudbrainStatus(task, oldStatus) + } + err = models.UpdateJob(task) + if err != nil { + ctx.Data["error"] = err.Error() + return + } + } else { //deleted record - if result != nil { - jobRes, _ := models.ConvertToJobResultPayload(result.Payload) - taskRoles := jobRes.TaskRoles - taskRes, _ := models.ConvertToTaskPod(taskRoles[cloudbrain.SubTaskName].(map[string]interface{})) - ctx.Data["taskRes"] = taskRes - ctx.Data["ExitDiagnostics"] = taskRes.TaskStatuses[0].ExitDiagnostics - oldStatus := task.Status - task.Status = taskRes.TaskStatuses[0].State - task.ContainerIp = "" - task.ContainerID = taskRes.TaskStatuses[0].ContainerID - models.ParseAndSetDurationFromCloudBrainOne(jobRes, task) - - if task.DeletedAt.IsZero() { //normal record - if oldStatus != task.Status { - notification.NotifyChangeCloudbrainStatus(task, oldStatus) - } - err = models.UpdateJob(task) - if err != nil { - ctx.Data["error"] = err.Error() - return } - } else { //deleted record + ctx.Data["result"] = jobRes + } else { + log.Info("error:" + err.Error()) + return } - - ctx.Data["result"] = jobRes - } else { - log.Info("error:" + err.Error()) - return } user, err := models.GetUserByID(task.UserID) @@ -1939,6 +1944,11 @@ func SyncCloudbrainStatus() { task.CorrectCreateUnix() if oldStatus != task.Status { notification.NotifyChangeCloudbrainStatus(task, oldStatus) + if models.IsTrainJobTerminal(task.Status) && task.ComputeResource == models.NPUResource { + if len(result.JobInfo.Tasks[0].CenterID) == 1 { + urchin.GetBackNpuModel(task.ID, grampus.GetRemoteEndPoint(result.JobInfo.Tasks[0].CenterID[0]), grampus.BucketRemote, grampus.GetNpuModelObjectKey(task.JobName), grampus.GetCenterProxy(setting.Grampus.LocalCenterID)) + } + } } err = models.UpdateJob(task) if err != nil { @@ -2222,7 +2232,7 @@ func CloudBrainBenchmarkNew(ctx *context.Context) { ctx.Data["description"] = "" ctx.Data["benchmarkTypeID"] = -1 ctx.Data["benchmark_child_types_id_hidden"] = -1 - err := cloudBrainNewDataPrepare(ctx) + err := cloudBrainNewDataPrepare(ctx, string(models.JobTypeBenchmark)) if err != nil { ctx.ServerError("get new cloudbrain info failed", err) return @@ -2327,6 +2337,7 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo benchmarkTypeID := form.BenchmarkTypeID benchmarkChildTypeID := form.BenchmarkChildTypeID repo := ctx.Repo.Repository + jobType := form.JobType ctx.Data["description"] = form.Description ctx.Data["benchmarkTypeID"] = benchmarkTypeID @@ -2336,31 +2347,31 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo isOk, err := lock.Lock(models.CloudbrainKeyDuration) if !isOk { log.Error("lock processed failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tplCloudBrainBenchmarkNew, &form) return } defer lock.UnLock() - tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, string(models.JobTypeBenchmark), displayJobName) + tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, jobType, displayJobName) if err == nil { if len(tasks) != 0 { log.Error("the job name did already exist", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("the job name did already exist", tplCloudBrainBenchmarkNew, &form) return } } else { if !models.IsErrJobNotExist(err) { log.Error("system error, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tplCloudBrainBenchmarkNew, &form) return } } if !jobNamePattern.MatchString(jobName) { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tplCloudBrainBenchmarkNew, &form) return } @@ -2368,7 +2379,7 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo childInfo, err := getBenchmarkAttachment(benchmarkTypeID, benchmarkChildTypeID, ctx) if err != nil { log.Error("getBenchmarkAttachment failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("benchmark type error", tplCloudBrainBenchmarkNew, &form) return } @@ -2379,27 +2390,27 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo Cluster: models.OpenICluster, AiCenterCode: models.AICenterOfCloudBrainOne}) if err != nil || spec == nil { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("Resource specification not available", tplCloudBrainBenchmarkNew, &form) return } if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplCloudBrainBenchmarkNew, &form) return } - count, err := models.GetBenchmarkCountByUserID(ctx.User.ID) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainOne, jobType) if err != nil { log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tplCloudBrainBenchmarkNew, &form) return } else { if count >= 1 { log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain.morethanonejob"), tplCloudBrainBenchmarkNew, &form) return } @@ -2408,7 +2419,7 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo os.RemoveAll(codePath) if err := downloadCode(repo, codePath, cloudbrain.DefaultBranchName); err != nil { log.Error("downloadCode failed, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tplCloudBrainBenchmarkNew, &form) return } @@ -2417,11 +2428,11 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo if os.IsNotExist(err) { // file does not exist log.Error("train.py does not exist, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("train.py does not exist", tplCloudBrainBenchmarkNew, &form) } else { log.Error("Stat failed, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tplCloudBrainBenchmarkNew, &form) } return @@ -2429,11 +2440,11 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo if os.IsNotExist(err) { // file does not exist log.Error("test.py does not exist, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("test.py does not exist", tplCloudBrainBenchmarkNew, &form) } else { log.Error("Stat failed, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tplCloudBrainBenchmarkNew, &form) } return @@ -2441,7 +2452,7 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo if err := uploadCodeToMinio(codePath+"/", jobName, cloudbrain.CodeMountPath+"/"); err != nil { log.Error("uploadCodeToMinio failed, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tplCloudBrainBenchmarkNew, &form) return } @@ -2466,7 +2477,7 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo datasetInfos, datasetNames, err := models.GetDatasetInfo(uuid) if err != nil { log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tplCloudBrainBenchmarkNew, &form) return } @@ -2500,7 +2511,7 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo err = cloudbrain.GenerateTask(req) if err != nil { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(err.Error(), tplCloudBrainBenchmarkNew, &form) return } @@ -2526,7 +2537,7 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) isOk, err := lock.Lock(models.CloudbrainKeyDuration) if !isOk { log.Error("lock processed failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tpl, &form) return } @@ -2536,42 +2547,42 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) if err == nil { if len(tasks) != 0 { log.Error("the job name did already exist", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("the job name did already exist", tpl, &form) return } } else { if !models.IsErrJobNotExist(err) { log.Error("system error, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tpl, &form) return } } if !jobNamePattern.MatchString(displayJobName) { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tpl, &form) return } if jobType != string(models.JobTypeSnn4imagenet) && jobType != string(models.JobTypeBrainScore) { log.Error("jobtype error:", jobType, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("jobtype error", tpl, &form) return } - count, err := models.GetBenchmarkCountByUserID(ctx.User.ID) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainOne, jobType) if err != nil { log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("system error", tpl, &form) return } else { if count >= 1 { log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("repo.cloudbrain.morethanonejob"), tpl, &form) return } @@ -2603,7 +2614,7 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) datasetInfos, datasetNames, err := models.GetDatasetInfo(uuid) if err != nil { log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form) return } @@ -2613,14 +2624,14 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) Cluster: models.OpenICluster, AiCenterCode: models.AICenterOfCloudBrainOne}) if err != nil || spec == nil { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr("Resource specification not available", tpl, &form) return } if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form) return } @@ -2654,7 +2665,7 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) err = cloudbrain.GenerateTask(req) if err != nil { - cloudBrainNewDataPrepare(ctx) + cloudBrainNewDataPrepare(ctx, jobType) ctx.RenderWithErr(err.Error(), tpl, &form) return } @@ -2701,7 +2712,7 @@ func CloudBrainTrainJobVersionNew(ctx *context.Context) { } func cloudBrainTrainJobCreate(ctx *context.Context) { - err := cloudBrainNewDataPrepare(ctx) + err := cloudBrainNewDataPrepare(ctx, string(models.JobTypeTrain)) if err != nil { ctx.ServerError("get new train-job info failed", err) return @@ -2710,7 +2721,7 @@ func cloudBrainTrainJobCreate(ctx *context.Context) { } func InferenceCloudBrainJobNew(ctx *context.Context) { - err := cloudBrainNewDataPrepare(ctx) + err := cloudBrainNewDataPrepare(ctx, string(models.JobTypeInference)) if err != nil { ctx.ServerError("get new train-job info failed", err) return diff --git a/routers/repo/cloudbrain_statistic.go b/routers/repo/cloudbrain_statistic.go new file mode 100644 index 000000000..3814c2daf --- /dev/null +++ b/routers/repo/cloudbrain_statistic.go @@ -0,0 +1,169 @@ +package repo + +import ( + "strings" + "time" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/timeutil" +) + +func CloudbrainDurationStatisticHour() { + + dateTime := time.Now().Format("2006-01-02 15:04:05") + dayTime := time.Now().Format("2006-01-02") + now := time.Now() + + currentTime := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) + + m, _ := time.ParseDuration("-1h") + beginTime := currentTime.Add(m).Unix() + endTime := currentTime.Unix() + hourTime := currentTime.Add(m).Hour() + + ciTasks, err := models.GetCloudbrainByTime(beginTime, endTime) + if err != nil { + log.Info("GetCloudbrainByTime err: %v", err) + return + } + specMap := make(map[string]*models.Specification) + models.LoadSpecs4CloudbrainInfo(ciTasks) + for _, cloudbrain := range ciTasks { + if _, ok := specMap[cloudbrain.Cloudbrain.Spec.AiCenterCode+"/"+cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { + if cloudbrain.Cloudbrain.Spec != nil { + specMap[cloudbrain.Cloudbrain.Spec.AiCenterCode+"/"+cloudbrain.Cloudbrain.Spec.AccCardType] = cloudbrain.Cloudbrain.Spec + } + } + } + + cloudBrainCenterCodeAndCardTypeInfo := getcloudBrainCenterCodeAndCardTypeInfo(ciTasks, beginTime, endTime) + + resourceQueues, err := models.GetCanUseCardInfo() + if err != nil { + log.Info("GetCanUseCardInfo err: %v", err) + return + } + cardsTotalDurationMap := make(map[string]int) + for _, resourceQueue := range resourceQueues { + cardsTotalDurationMap[resourceQueue.Cluster+"/"+resourceQueue.AiCenterName+"/"+resourceQueue.AiCenterCode+"/"+resourceQueue.AccCardType+"/"+resourceQueue.ComputeResource] = resourceQueue.CardsTotalNum * 1 * 60 * 60 + } + + for centerCode, CardTypeInfo := range cloudBrainCenterCodeAndCardTypeInfo { + for cardType, cardDuration := range CardTypeInfo { + spec := specMap[centerCode+"/"+cardType] + if spec != nil { + if err := models.DeleteCloudbrainDurationStatisticHour(dayTime, hourTime, centerCode, cardType); err != nil { + log.Error("DeleteCloudbrainDurationStatisticHour failed: %v", err.Error()) + return + } + if _, ok := cardsTotalDurationMap[spec.Cluster+"/"+spec.AiCenterName+"/"+centerCode+"/"+cardType+"/"+spec.ComputeResource]; !ok { + cardsTotalDurationMap[spec.Cluster+"/"+spec.AiCenterName+"/"+centerCode+"/"+cardType+"/"+spec.ComputeResource] = 0 + } + cloudbrainDurationStat := models.CloudbrainDurationStatistic{ + DateTime: dateTime, + DayTime: dayTime, + HourTime: hourTime, + Cluster: spec.Cluster, + AiCenterName: spec.AiCenterName, + AiCenterCode: centerCode, + AccCardType: cardType, + ComputeResource: spec.ComputeResource, + CardsUseDuration: cardDuration, + CardsTotalDuration: cardsTotalDurationMap[spec.Cluster+"/"+spec.AiCenterName+"/"+centerCode+"/"+cardType+"/"+spec.ComputeResource], + CreatedUnix: timeutil.TimeStampNow(), + } + if _, err = models.InsertCloudbrainDurationStatistic(&cloudbrainDurationStat); err != nil { + log.Error("Insert cloudbrainDurationStat failed: %v", err.Error()) + } + delete(cardsTotalDurationMap, spec.Cluster+"/"+spec.AiCenterName+"/"+centerCode+"/"+cardType+"/"+spec.ComputeResource) + } + } + } + + for key, cardsTotalDuration := range cardsTotalDurationMap { + if err := models.DeleteCloudbrainDurationStatisticHour(dayTime, hourTime, strings.Split(key, "/")[2], strings.Split(key, "/")[3]); err != nil { + log.Error("DeleteCloudbrainDurationStatisticHour failed: %v", err.Error()) + return + } + cloudbrainDurationStat := models.CloudbrainDurationStatistic{ + DateTime: dateTime, + DayTime: dayTime, + HourTime: hourTime, + Cluster: strings.Split(key, "/")[0], + AiCenterName: strings.Split(key, "/")[1], + AiCenterCode: strings.Split(key, "/")[2], + AccCardType: strings.Split(key, "/")[3], + ComputeResource: strings.Split(key, "/")[4], + CardsUseDuration: 0, + CardsTotalDuration: cardsTotalDuration, + CreatedUnix: timeutil.TimeStampNow(), + } + if _, err = models.InsertCloudbrainDurationStatistic(&cloudbrainDurationStat); err != nil { + log.Error("Insert cloudbrainDurationStat failed: %v", err.Error()) + } + } + + log.Info("finish summary cloudbrainDurationStat") +} + +func getcloudBrainCenterCodeAndCardTypeInfo(ciTasks []*models.CloudbrainInfo, beginTime int64, endTime int64) map[string]map[string]int { + var WorkServerNumber int + var AccCardsNum int + cloudBrainCenterCodeAndCardType := make(map[string]map[string]int) + for _, cloudbrain := range ciTasks { + + if cloudbrain.Cloudbrain.StartTime == 0 { + cloudbrain.Cloudbrain.StartTime = cloudbrain.Cloudbrain.CreatedUnix + } + if cloudbrain.Cloudbrain.EndTime == 0 { + cloudbrain.Cloudbrain.EndTime = cloudbrain.Cloudbrain.UpdatedUnix + } + if cloudbrain.Cloudbrain.WorkServerNumber >= 1 { + WorkServerNumber = cloudbrain.Cloudbrain.WorkServerNumber + } else { + WorkServerNumber = 1 + } + if cloudbrain.Cloudbrain.Spec == nil { + AccCardsNum = 1 + } else { + AccCardsNum = cloudbrain.Cloudbrain.Spec.AccCardsNum + } + if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode]; !ok { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode] = make(map[string]int) + } + + if cloudbrain.Cloudbrain.Status == string(models.ModelArtsRunning) { + if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { + if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(endTime) - int(beginTime)) + } else { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(endTime) - int(cloudbrain.Cloudbrain.StartTime)) + } + } else { + if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(endTime) - int(beginTime)) + } else { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(endTime) - int(cloudbrain.Cloudbrain.StartTime)) + } + } + } else { + if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { + if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(beginTime)) + } else { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(cloudbrain.Cloudbrain.StartTime)) + } + } else { + if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(beginTime)) + } else { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(cloudbrain.Cloudbrain.StartTime)) + } + } + + } + } + + return cloudBrainCenterCodeAndCardType +} diff --git a/routers/repo/dataset.go b/routers/repo/dataset.go index f0e41024b..16e21e43b 100755 --- a/routers/repo/dataset.go +++ b/routers/repo/dataset.go @@ -523,6 +523,7 @@ func ReferenceDatasetAvailable(ctx *context.Context) { PublicOnly: true, NeedAttachment: false, CloudBrainType: models.TypeCloudBrainAll, + SearchOrderBy: models.SearchOrderByDefault, } dataset, _ := models.GetDatasetByRepo(&models.Repository{ID: ctx.Repo.Repository.ID}) if dataset != nil { @@ -538,6 +539,7 @@ func PublicDatasetMultiple(ctx *context.Context) { PublicOnly: true, NeedAttachment: true, CloudBrainType: ctx.QueryInt("type"), + SearchOrderBy: models.SearchOrderByDefault, } datasetMultiple(ctx, opts) diff --git a/routers/repo/grampus.go b/routers/repo/grampus.go index b78bdebd3..4718fe04f 100755 --- a/routers/repo/grampus.go +++ b/routers/repo/grampus.go @@ -1,6 +1,7 @@ package repo import ( + "code.gitea.io/gitea/modules/urfs_client/urchin" "encoding/json" "errors" "fmt" @@ -12,6 +13,8 @@ import ( "strings" "time" + "code.gitea.io/gitea/services/cloudbrain/cloudbrainTask" + "code.gitea.io/gitea/modules/dataset" "code.gitea.io/gitea/services/cloudbrain/resource" @@ -35,6 +38,7 @@ import ( "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" + cloudbrainService "code.gitea.io/gitea/services/cloudbrain" ) const ( @@ -135,10 +139,15 @@ func grampusTrainJobNewDataPrepare(ctx *context.Context, processType string) err ctx.Data["datasetType"] = models.TypeCloudBrainOne waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeC2Net, models.GPUResource, models.JobTypeTrain) ctx.Data["WaitCount"] = waitCount + NotStopTaskCount, _ := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeC2Net, string(models.JobTypeTrain), models.GPUResource) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount + } else if processType == grampus.ProcessorTypeNPU { ctx.Data["datasetType"] = models.TypeCloudBrainTwo waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeC2Net, models.NPUResource, models.JobTypeTrain) ctx.Data["WaitCount"] = waitCount + NotStopTaskCount, _ := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeC2Net, string(models.JobTypeTrain), models.NPUResource) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount } if ctx.Cloudbrain != nil { @@ -300,7 +309,7 @@ func grampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain } //check count limit - count, err := models.GetGrampusCountByUserID(ctx.User.ID, string(models.JobTypeTrain), models.GPUResource) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeC2Net, string(models.JobTypeTrain), models.GPUResource) if err != nil { log.Error("GetGrampusCountByUserID failed:%v", err, ctx.Data["MsgID"]) grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU) @@ -423,7 +432,7 @@ func grampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain //prepare command preTrainModelPath := getPreTrainModelPath(form.PreTrainModelUrl, form.CkptName) - command, err := generateCommand(repo.Name, grampus.ProcessorTypeGPU, codeMinioPath+cloudbrain.DefaultBranchName+".zip", datasetRemotePath, bootFile, params, setting.CBCodePathPrefix+jobName+cloudbrain.ModelMountPath+"/", allFileName, preTrainModelPath, form.CkptName) + command, err := generateCommand(repo.Name, grampus.ProcessorTypeGPU, codeMinioPath+cloudbrain.DefaultBranchName+".zip", datasetRemotePath, bootFile, params, setting.CBCodePathPrefix+jobName+cloudbrain.ModelMountPath+"/", allFileName, preTrainModelPath, form.CkptName, "") if err != nil { log.Error("Failed to generateCommand: %s (%v)", displayJobName, err, ctx.Data["MsgID"]) grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU) @@ -570,7 +579,7 @@ func grampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain } //check count limit - count, err := models.GetGrampusCountByUserID(ctx.User.ID, string(models.JobTypeTrain), models.NPUResource) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeC2Net, string(models.JobTypeTrain), models.NPUResource) if err != nil { log.Error("GetGrampusCountByUserID failed:%v", err, ctx.Data["MsgID"]) grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU) @@ -680,7 +689,7 @@ func grampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain //prepare command preTrainModelPath := getPreTrainModelPath(form.PreTrainModelUrl, form.CkptName) - command, err := generateCommand(repo.Name, grampus.ProcessorTypeNPU, codeObsPath+cloudbrain.DefaultBranchName+".zip", datasetRemotePath, bootFile, params, setting.CodePathPrefix+jobName+modelarts.OutputPath, allFileName, preTrainModelPath, form.CkptName) + command, err := generateCommand(repo.Name, grampus.ProcessorTypeNPU, codeObsPath+cloudbrain.DefaultBranchName+".zip", datasetRemotePath, bootFile, params, setting.CodePathPrefix+jobName+modelarts.OutputPath, allFileName, preTrainModelPath, form.CkptName, grampus.GetNpuModelRemoteObsUrl(jobName)) if err != nil { log.Error("Failed to generateCommand: %s (%v)", displayJobName, err, ctx.Data["MsgID"]) grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU) @@ -854,7 +863,7 @@ func GrampusTrainJobShow(ctx *context.Context) { } oldStatus := task.Status task.Status = grampus.TransTrainJobStatus(result.JobInfo.Status) - if task.Status != result.JobInfo.Status || result.JobInfo.Status == models.GrampusStatusRunning { + if task.Status != oldStatus || task.Status == models.GrampusStatusRunning { task.Duration = result.JobInfo.RunSec if task.Duration < 0 { task.Duration = 0 @@ -870,6 +879,11 @@ func GrampusTrainJobShow(ctx *context.Context) { task.CorrectCreateUnix() if oldStatus != task.Status { notification.NotifyChangeCloudbrainStatus(task, oldStatus) + if models.IsTrainJobTerminal(task.Status) && task.ComputeResource == models.NPUResource { + if len(result.JobInfo.Tasks[0].CenterID) == 1 { + urchin.GetBackNpuModel(task.ID, grampus.GetRemoteEndPoint(result.JobInfo.Tasks[0].CenterID[0]), grampus.BucketRemote, grampus.GetNpuModelObjectKey(task.JobName), grampus.GetCenterProxy(setting.Grampus.LocalCenterID)) + } + } } err = models.UpdateJob(task) if err != nil { @@ -908,10 +922,7 @@ func GrampusTrainJobShow(ctx *context.Context) { ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) ctx.Data["displayJobName"] = task.DisplayJobName - aiCenterInfo := strings.Split(task.AiCenter, "+") - if len(aiCenterInfo) == 2 { - ctx.Data["ai_center"] = aiCenterInfo[1] - } + ctx.Data["ai_center"] = cloudbrainService.GetAiCenterShow(task.AiCenter, ctx) ctx.HTML(http.StatusOK, tplGrampusTrainJobShow) } @@ -967,15 +978,18 @@ func GrampusGetLog(ctx *context.Context) { return } -func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bootFile, paramSrc, outputRemotePath, datasetName, pretrainModelPath, pretrainModelFileName string) (string, error) { +func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bootFile, paramSrc, outputRemotePath, datasetName, pretrainModelPath, pretrainModelFileName, modelRemoteObsUrl string) (string, error) { var command string + //prepare workDir := grampus.NpuWorkDir - if processorType == grampus.ProcessorTypeGPU { + if processorType == grampus.ProcessorTypeNPU { + command += "pwd;cd " + workDir + grampus.CommandPrepareScriptNpu + } else if processorType == grampus.ProcessorTypeGPU { workDir = grampus.GpuWorkDir + command += "pwd;cd " + workDir + fmt.Sprintf(grampus.CommandPrepareScriptGpu, setting.Grampus.SyncScriptProject, setting.Grampus.SyncScriptProject) } - command += "pwd;cd " + workDir + fmt.Sprintf(grampus.CommandPrepareScript, setting.Grampus.SyncScriptProject, setting.Grampus.SyncScriptProject) //download code & dataset if processorType == grampus.ProcessorTypeNPU { //no need to download code & dataset by internet @@ -990,7 +1004,7 @@ func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bo //no need to process } else if processorType == grampus.ProcessorTypeGPU { unZipDatasetCommand := generateDatasetUnzipCommand(datasetName) - commandUnzip := "cd " + workDir + "code;unzip -q master.zip;echo \"start to unzip dataset\";cd " + workDir + "dataset;" + unZipDatasetCommand + commandUnzip := "cd " + workDir + "code;unzip -q master.zip;rm -f master.zip;echo \"start to unzip dataset\";cd " + workDir + "dataset;" + unZipDatasetCommand command += commandUnzip } @@ -1024,7 +1038,8 @@ func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bo var commandCode string if processorType == grampus.ProcessorTypeNPU { - commandCode = "/bin/bash /home/work/run_train_for_openi.sh /home/work/openi.py /tmp/log/train.log" + paramCode + ";" + paramCode += " --model_url=" + modelRemoteObsUrl + commandCode = "/bin/bash /home/work/run_train_for_openi.sh /home/work/openi.py " + grampus.NpuLocalLogUrl + paramCode + ";" } else if processorType == grampus.ProcessorTypeGPU { if pretrainModelFileName != "" { paramCode += " --ckpt_url" + "=" + workDir + "pretrainmodel/" + pretrainModelFileName @@ -1040,8 +1055,7 @@ func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bo //upload models if processorType == grampus.ProcessorTypeNPU { - commandUpload := "cd " + workDir + setting.Grampus.SyncScriptProject + "/;./uploader_for_npu " + setting.Bucket + " " + outputRemotePath + " " + workDir + "output/;" - command += commandUpload + // no need to upload } else if processorType == grampus.ProcessorTypeGPU { commandUpload := "cd " + workDir + setting.Grampus.SyncScriptProject + "/;./uploader_for_gpu " + setting.Grampus.Env + " " + outputRemotePath + " " + workDir + "output/;" command += commandUpload @@ -1072,6 +1086,7 @@ func generateDatasetUnzipCommand(datasetName string) string { if strings.HasSuffix(datasetNameArray[0], ".tar.gz") { unZipDatasetCommand = "tar --strip-components=1 -zxvf '" + datasetName + "';" } + unZipDatasetCommand += "rm -f '" + datasetName + "';" } else { //多数据集 for _, datasetNameTemp := range datasetNameArray { @@ -1080,6 +1095,7 @@ func generateDatasetUnzipCommand(datasetName string) string { } else { unZipDatasetCommand = unZipDatasetCommand + "unzip -q '" + datasetNameTemp + "' -d './" + strings.TrimSuffix(datasetNameTemp, ".zip") + "';" } + unZipDatasetCommand += "rm -f '" + datasetNameTemp + "';" } } diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index 6e44b3cd2..be59b0f3f 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -15,6 +15,8 @@ import ( "time" "unicode/utf8" + "code.gitea.io/gitea/services/cloudbrain/cloudbrainTask" + "code.gitea.io/gitea/modules/dataset" "code.gitea.io/gitea/modules/modelarts_cd" @@ -144,6 +146,8 @@ func notebookNewDataPrepare(ctx *context.Context) error { waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "") ctx.Data["WaitCount"] = waitCount + NotStopTaskCount, _ := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeDebug)) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount return nil } @@ -162,50 +166,6 @@ func prepareCloudbrainTwoDebugSpecs(ctx *context.Context) { ctx.Data["Specs"] = noteBookSpecs } -func NotebookCreate(ctx *context.Context, form auth.CreateModelArtsNotebookForm) { - ctx.Data["PageIsNotebook"] = true - jobName := form.JobName - uuid := form.Attachment - description := form.Description - flavor := form.Flavor - - count, err := models.GetCloudbrainNotebookCountByUserID(ctx.User.ID) - if err != nil { - log.Error("GetCloudbrainNotebookCountByUserID failed:%v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) - ctx.RenderWithErr("system error", tplModelArtsNotebookNew, &form) - return - } else { - if count >= 1 { - log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) - ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplModelArtsNotebookNew, &form) - return - } - } - _, err = models.GetCloudbrainByName(jobName) - if err == nil { - log.Error("the job name did already exist", ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) - ctx.RenderWithErr("the job name did already exist", tplModelArtsNotebookNew, &form) - return - } else { - if !models.IsErrJobNotExist(err) { - log.Error("system error, %v", err, ctx.Data["MsgID"]) - cloudBrainNewDataPrepare(ctx) - ctx.RenderWithErr("system error", tplModelArtsNotebookNew, &form) - return - } - } - - err = modelarts.GenerateTask(ctx, jobName, uuid, description, flavor) - if err != nil { - ctx.RenderWithErr(err.Error(), tplModelArtsNotebookNew, &form) - return - } - ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob?debugListType=all") -} - func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm) { ctx.Data["PageIsNotebook"] = true displayJobName := form.DisplayJobName @@ -225,7 +185,8 @@ func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm } defer lock.UnLock() - count, err := models.GetCloudbrainNotebookCountByUserID(ctx.User.ID) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeDebug)) + if err != nil { log.Error("GetCloudbrainNotebookCountByUserID failed:%v", err, ctx.Data["MsgID"]) notebookNewDataPrepare(ctx) @@ -272,7 +233,7 @@ func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm } if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { log.Error("point balance is not enough,userId=%d specId=%d ", ctx.User.ID, spec.ID) - cloudBrainNewDataPrepare(ctx) + notebookNewDataPrepare(ctx) ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsNotebookNew, &form) return } @@ -450,7 +411,8 @@ func NotebookRestart(ctx *context.Context) { break } - count, err := models.GetCloudbrainNotebookCountByUserID(ctx.User.ID) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeDebug)) + if err != nil { log.Error("GetCloudbrainNotebookCountByUserID failed:%v", err, ctx.Data["MsgID"]) errorMsg = "system error" @@ -798,6 +760,8 @@ func trainJobNewDataPrepare(ctx *context.Context) error { ctx.Data["datasetType"] = models.TypeCloudBrainTwo waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "") ctx.Data["WaitCount"] = waitCount + NotStopTaskCount, _ := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeTrain)) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount setMultiNodeIfConfigureMatch(ctx) @@ -966,6 +930,8 @@ func trainJobNewVersionDataPrepare(ctx *context.Context) error { ctx.Data["config_list"] = configList.ParaConfigs waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "") ctx.Data["WaitCount"] = waitCount + NotStopTaskCount, _ := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeTrain)) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount return nil } @@ -1012,7 +978,8 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) } defer lock.UnLock() - count, err := models.GetCloudbrainTrainJobCountByUserID(ctx.User.ID) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeTrain)) + if err != nil { log.Error("GetCloudbrainTrainJobCountByUserID failed:%v", err, ctx.Data["MsgID"]) trainJobNewDataPrepare(ctx) @@ -1345,6 +1312,36 @@ func getUserCommand(engineId int, req *modelarts.GenerateTrainJobReq) (string, s return userCommand, userImageUrl } +func getInfJobUserCommand(engineId int, req *modelarts.GenerateInferenceJobReq) (string, string) { + userImageUrl := "" + userCommand := "" + if engineId < 0 { + tmpCodeObsPath := strings.Trim(req.CodeObsPath, "/") + tmpCodeObsPaths := strings.Split(tmpCodeObsPath, "/") + lastCodeDir := "code" + if len(tmpCodeObsPaths) > 0 { + lastCodeDir = tmpCodeObsPaths[len(tmpCodeObsPaths)-1] + } + userCommand = "/bin/bash /home/work/run_train.sh 's3://" + req.CodeObsPath + "' '" + lastCodeDir + "/" + req.BootFile + "' '/tmp/log/train.log' --'data_url'='s3://" + req.DataUrl + "' --'train_url'='s3://" + req.TrainUrl + "'" + var versionInfos modelarts.VersionInfo + if err := json.Unmarshal([]byte(setting.EngineVersions), &versionInfos); err != nil { + log.Info("json parse err." + err.Error()) + } else { + for _, engine := range versionInfos.Version { + if engine.ID == engineId { + userImageUrl = engine.Url + break + } + } + } + for _, param := range req.Parameters { + userCommand += " --'" + param.Label + "'='" + param.Value + "'" + } + return userCommand, userImageUrl + } + return userCommand, userImageUrl +} + func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) { ctx.Data["PageIsTrainJob"] = true var jobID = ctx.Params(":jobid") @@ -1356,7 +1353,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ return } - count, err := models.GetCloudbrainTrainJobCountByUserID(ctx.User.ID) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeTrain)) if err != nil { log.Error("GetCloudbrainTrainJobCountByUserID failed:%v", err, ctx.Data["MsgID"]) trainJobNewVersionDataPrepare(ctx) @@ -2007,7 +2004,8 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference } defer lock.UnLock() - count, err := models.GetCloudbrainInferenceJobCountByUserID(ctx.User.ID) + count, err := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeInference)) + if err != nil { log.Error("GetCloudbrainInferenceJobCountByUserID failed:%v", err, ctx.Data["MsgID"]) inferenceJobErrorNewDataPrepare(ctx, form) @@ -2203,6 +2201,10 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference JobType: string(models.JobTypeInference), } + userCommand, userImageUrl := getInfJobUserCommand(engineID, req) + req.UserCommand = userCommand + req.UserImageUrl = userImageUrl + err = modelarts.GenerateInferenceJob(ctx, req) if err != nil { log.Error("GenerateTrainJob failed:%v", err.Error()) @@ -2409,6 +2411,8 @@ func inferenceJobNewDataPrepare(ctx *context.Context) error { ctx.Data["datasetType"] = models.TypeCloudBrainTwo waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "") ctx.Data["WaitCount"] = waitCount + NotStopTaskCount, _ := cloudbrainTask.GetNotFinalStatusTaskCount(ctx.User.ID, models.TypeCloudBrainTwo, string(models.JobTypeInference)) + ctx.Data["NotStopTaskCount"] = NotStopTaskCount return nil } diff --git a/routers/response/response_list.go b/routers/response/response_list.go index 6514f3edd..bc44e9d68 100644 --- a/routers/response/response_list.go +++ b/routers/response/response_list.go @@ -1,5 +1,14 @@ package response +//repo response var RESOURCE_QUEUE_NOT_AVAILABLE = &BizError{Code: 1001, Err: "resource queue not available"} var SPECIFICATION_NOT_EXIST = &BizError{Code: 1002, Err: "specification not exist"} var SPECIFICATION_NOT_AVAILABLE = &BizError{Code: 1003, Err: "specification not available"} + +var CATEGORY_STILL_HAS_BADGES = &BizError{Code: 1004, Err: "Please delete badges in the category first"} +var BADGES_STILL_HAS_USERS = &BizError{Code: 1005, Err: "Please delete users of badge first"} + +//common response +var SYSTEM_ERROR = &BizError{Code: 9009, Err: "System error.Please try again later"} +var INSUFFICIENT_PERMISSION = &BizError{Code: 9003, Err: "insufficient permissions"} +var PARAM_ERROR = &BizError{Code: 9001, Err: "param error permissions"} diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 9a523ea48..fd8b274e6 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -12,11 +12,14 @@ import ( "text/template" "time" - "code.gitea.io/gitea/routers/modelapp" + "code.gitea.io/gitea/routers/badge" "code.gitea.io/gitea/routers/reward/point" "code.gitea.io/gitea/routers/task" + badge_service "code.gitea.io/gitea/services/badge" "code.gitea.io/gitea/services/reward" + "code.gitea.io/gitea/routers/modelapp" + "code.gitea.io/gitea/modules/slideimage" "code.gitea.io/gitea/routers/image" @@ -196,6 +199,14 @@ func NewMacaron() *macaron.Macaron { }, )) m.Use(public.StaticHandler( + setting.IconUploadPath, + &public.Options{ + Prefix: "icons", + SkipLogging: setting.DisableRouterLog, + ExpiresAfter: setting.StaticCacheTime, + }, + )) + m.Use(public.StaticHandler( setting.RepositoryAvatarUploadPath, &public.Options{ Prefix: "repo-avatars", @@ -338,6 +349,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/action/notification", routers.ActionNotification) m.Get("/recommend/home", routers.RecommendHomeInfo) m.Get("/dashboard/invitation", routers.GetMapInfo) + m.Get("/resource_desc", routers.HomeResoruceDesc) //m.Get("/recommend/org", routers.RecommendOrgFromPromote) //m.Get("/recommend/repo", routers.RecommendRepoFromPromote) m.Get("/recommend/userrank/:index", routers.GetUserRankFromPromote) @@ -521,6 +533,8 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/avatar/:hash", user.AvatarByEmailHash) + m.Get("/show/icon/:hash", badge.GetIcon) + adminReq := context.Toggle(&context.ToggleOptions{SignInRequired: true, AdminRequired: true}) // ***** START: Admin ***** @@ -666,6 +680,23 @@ func RegisterRoutes(m *macaron.Macaron) { m.Post("/add/batch", bindIgnErr(models.BatchLimitConfigVO{}), task.BatchAddTaskConfig) m.Post("/^:action(new|edit|del)$", bindIgnErr(models.TaskConfigWithLimit{}), task.OperateTaskConfig) }) + + m.Group("/badge", func() { + m.Group("/category", func() { + m.Get("/list", badge.GetBadgeCategoryList) + m.Post("/^:action(new|edit|del)$", bindIgnErr(models.BadgeCategory4Show{}), badge.OperateBadgeCategory) + }) + m.Group("/customize", func() { + m.Get("/list", badge.GetCustomizeBadgeList) + }) + m.Group("/users", func() { + m.Get("", badge.GetBadgeUsers) + m.Post("/add", bindIgnErr(models.AddBadgeUsersReq{}), badge.AddOperateBadgeUsers) + m.Post("/del", bindIgnErr(models.DelBadgeUserReq{}), badge.DelBadgeUsers) + }) + m.Post("/^:action(new|edit|del)$", bindIgnErr(models.BadgeOperateReq{}), badge.OperateBadge) + }) + m.Post("/icon/upload", bindIgnErr(badge_service.IconUploadForm{}), badge.UploadIcon) }, operationReq) // ***** END: Operation ***** diff --git a/routers/user/home.go b/routers/user/home.go index 1a20c26e2..b6ab28f95 100755 --- a/routers/user/home.go +++ b/routers/user/home.go @@ -836,12 +836,18 @@ func Cloudbrains(ctx *context.Context) { return } models.LoadSpecs4CloudbrainInfo(ciTasks) - for i, task := range ciTasks { + for i, _ := range ciTasks { ciTasks[i].CanDebug = true ciTasks[i].CanDel = true - ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource + ciTasks[i].Cloudbrain.ComputeResource = ciTasks[i].ComputeResource + if ciTasks[i].Cloudbrain.Spec != nil { + if ciTasks[i].Cloudbrain.Type == models.TypeC2Net { + ciTasks[i].Cloudbrain.Spec.Cluster = models.C2NetCluster + } else { + ciTasks[i].Cloudbrain.Spec.Cluster = models.OpenICluster + } + } } - pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, getTotalPage(count, setting.UI.IssuePagingNum)) pager.SetDefaultParams(ctx) pager.AddParam(ctx, "listType", "ListType") diff --git a/routers/user/profile.go b/routers/user/profile.go index 42cdfd1a8..66a480b7f 100755 --- a/routers/user/profile.go +++ b/routers/user/profile.go @@ -6,6 +6,7 @@ package user import ( + "code.gitea.io/gitea/services/badge" "errors" "fmt" "path" @@ -90,10 +91,25 @@ func Profile(ctx *context.Context) { return } + // Show user badges + badges, err := badge.GetUserBadges(ctxUser.ID, models.ListOptions{Page: 1, PageSize: 5}) + if err != nil { + ctx.ServerError("GetUserBadges", err) + return + } + // Count user badges + cnt, err := badge.CountUserBadges(ctxUser.ID) + if err != nil { + ctx.ServerError("CountUserBadges", err) + return + } + ctx.Data["Title"] = ctxUser.DisplayName() ctx.Data["PageIsUserProfile"] = true ctx.Data["Owner"] = ctxUser ctx.Data["OpenIDs"] = openIDs + ctx.Data["RecentBadges"] = badges + ctx.Data["TotalBadges"] = cnt ctx.Data["EnableHeatmap"] = setting.Service.EnableUserHeatmap ctx.Data["HeatmapUser"] = ctxUser.Name showPrivate := ctx.IsSigned && (ctx.User.IsAdmin || ctx.User.ID == ctxUser.ID) @@ -297,6 +313,13 @@ func Profile(ctx *context.Context) { } total = int(count) + case "badge": + allBadges, err := badge.GetUserAllBadges(ctxUser.ID) + if err != nil { + ctx.ServerError("GetUserAllBadges", err) + return + } + ctx.Data["AllBadges"] = allBadges default: ctx.ServerError("tab error", errors.New("tab error")) return diff --git a/services/admin/operate_log/operate_log.go b/services/admin/operate_log/operate_log.go index 7b72ec2e2..f52950351 100644 --- a/services/admin/operate_log/operate_log.go +++ b/services/admin/operate_log/operate_log.go @@ -4,6 +4,13 @@ import ( "code.gitea.io/gitea/models" ) +type LogBizType string + +const ( + BadgeCategoryOperate LogBizType = "BadgeCategoryOperate" + BadgeOperate LogBizType = "BadgeOperate" +) + func Log(log models.AdminOperateLog) error { _, err := models.InsertAdminOperateLog(log) return err @@ -12,3 +19,34 @@ func Log(log models.AdminOperateLog) error { func NewLogValues() *models.LogValues { return &models.LogValues{Params: make([]models.LogValue, 0)} } + +func Log4Add(bizType LogBizType, newValue interface{}, doerId int64, comment string) { + Log(models.AdminOperateLog{ + BizType: string(bizType), + OperateType: "add", + NewValue: NewLogValues().Add("new", newValue).JsonString(), + CreatedBy: doerId, + Comment: comment, + }) +} + +func Log4Edit(bizType LogBizType, oldValue interface{}, newValue interface{}, doerId int64, comment string) { + Log(models.AdminOperateLog{ + BizType: string(bizType), + OperateType: "edit", + NewValue: NewLogValues().Add("new", newValue).JsonString(), + OldValue: NewLogValues().Add("old", oldValue).JsonString(), + CreatedBy: doerId, + Comment: comment, + }) +} + +func Log4Del(bizType LogBizType, oldValue interface{}, doerId int64, comment string) { + Log(models.AdminOperateLog{ + BizType: string(bizType), + OperateType: "del", + OldValue: NewLogValues().Add("old", oldValue).JsonString(), + CreatedBy: doerId, + Comment: comment, + }) +} diff --git a/services/badge/badge.go b/services/badge/badge.go new file mode 100644 index 000000000..c6f833f65 --- /dev/null +++ b/services/badge/badge.go @@ -0,0 +1,80 @@ +package badge + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/routers/response" + "code.gitea.io/gitea/services/admin/operate_log" + "errors" +) + +func GetBadgeList(opts models.GetBadgeOpts) (int64, []*models.Badge4AdminShow, error) { + total, list, err := models.GetBadgeList(opts) + if err != nil { + return 0, nil, err + } + if len(list) == 0 { + return 0, nil, nil + } + r := make([]*models.Badge4AdminShow, len(list)) + for i := 0; i < len(list); i++ { + r[i] = list[i].ToShow() + } + + return total, r, nil +} + +func AddBadge(m models.BadgeOperateReq, doer *models.User) *response.BizError { + _, err := models.GetBadgeCategoryById(m.CategoryId) + + if err != nil { + if models.IsErrRecordNotExist(err) { + return response.NewBizError(errors.New("badge category is not available")) + } + return response.NewBizError(err) + } + _, err = models.AddBadge(m.ToDTO()) + if err != nil { + return response.NewBizError(err) + } + operate_log.Log4Add(operate_log.BadgeOperate, m, doer.ID, "新增了勋章") + return nil +} + +func EditBadge(m models.BadgeOperateReq, doer *models.User) *response.BizError { + if m.ID == 0 { + log.Error(" EditBadge param error") + return response.NewBizError(errors.New("param error")) + } + old, err := models.GetBadgeById(m.ID) + if err != nil { + return response.NewBizError(err) + } + _, err = models.UpdateBadgeById(m.ID, m.ToDTO()) + if err != nil { + return response.NewBizError(err) + } + operate_log.Log4Edit(operate_log.BadgeOperate, old, m.ToDTO(), doer.ID, "修改了勋章") + return nil +} + +func DelBadge(id int64, doer *models.User) *response.BizError { + if id == 0 { + log.Error(" DelBadge param error") + return response.NewBizError(errors.New("param error")) + } + old, err := models.GetBadgeById(id) + if err != nil { + return response.NewBizError(err) + } + n, _, err := models.GetBadgeUsers(id, models.ListOptions{PageSize: 1, Page: 1}) + if err != nil { + return response.NewBizError(err) + } + if n > 0 { + return response.BADGES_STILL_HAS_USERS + } + _, err = models.DelBadge(id) + operate_log.Log4Del(operate_log.BadgeOperate, old, doer.ID, "删除了勋章") + return nil +} diff --git a/services/badge/category.go b/services/badge/category.go new file mode 100644 index 000000000..445dedcad --- /dev/null +++ b/services/badge/category.go @@ -0,0 +1,72 @@ +package badge + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/routers/response" + "code.gitea.io/gitea/services/admin/operate_log" + "errors" +) + +func GetBadgeCategoryList(opts models.ListOptions) (int64, []*models.BadgeCategory4Show, error) { + total, list, err := models.GetBadgeCategoryListPaging(opts) + if err != nil { + return 0, nil, err + } + if len(list) == 0 { + return 0, nil, nil + } + r := make([]*models.BadgeCategory4Show, len(list)) + for i := 0; i < len(list); i++ { + r[i] = list[i].ToShow() + } + + return total, r, nil +} + +func AddBadgeCategory(m models.BadgeCategory4Show, doer *models.User) *response.BizError { + _, err := models.AddBadgeCategory(m.ToDTO()) + if err != nil { + return response.NewBizError(err) + } + operate_log.Log4Add(operate_log.BadgeCategoryOperate, m, doer.ID, "新增了勋章分类") + return nil +} + +func EditBadgeCategory(m models.BadgeCategory4Show, doer *models.User) *response.BizError { + if m.ID == 0 { + log.Error(" EditBadgeCategory param error") + return response.NewBizError(errors.New("param error")) + } + old, err := models.GetBadgeCategoryById(m.ID) + if err != nil { + return response.NewBizError(err) + } + _, err = models.UpdateBadgeCategoryById(m.ID, m.ToDTO()) + if err != nil { + return response.NewBizError(err) + } + operate_log.Log4Edit(operate_log.BadgeCategoryOperate, old, m.ToDTO(), doer.ID, "修改了勋章分类") + return nil +} + +func DelBadgeCategory(id int64, doer *models.User) *response.BizError { + if id == 0 { + log.Error(" DelBadgeCategory param error") + return response.NewBizError(errors.New("param error")) + } + old, err := models.GetBadgeCategoryById(id) + if err != nil { + return response.NewBizError(err) + } + badges, err := models.GetBadgeByCategoryId(id) + if err != nil { + return response.NewBizError(err) + } + if len(badges) > 0 { + return response.CATEGORY_STILL_HAS_BADGES + } + _, err = models.DelBadgeCategory(id) + operate_log.Log4Del(operate_log.BadgeCategoryOperate, old, doer.ID, "删除了勋章分类") + return nil +} diff --git a/services/badge/icon.go b/services/badge/icon.go new file mode 100644 index 000000000..fd731b586 --- /dev/null +++ b/services/badge/icon.go @@ -0,0 +1,140 @@ +package badge + +import ( + "bytes" + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/base" + "code.gitea.io/gitea/modules/setting" + "crypto/md5" + "errors" + "fmt" + "github.com/nfnt/resize" + "github.com/oliamb/cutter" + "image" + "image/png" + "io/ioutil" + "mime/multipart" + "os" +) + +type IconUploader struct { + Config IconUploadConfig +} + +type IconUploadForm struct { + Icon *multipart.FileHeader +} + +type IconUploadConfig struct { + FileMaxSize int64 + FileMaxWidth int + FileMaxHeight int + DefaultSize uint + NeedResize bool + NeedSquare bool +} + +func NewIconUploader(config IconUploadConfig) IconUploader { + return IconUploader{Config: config} +} + +func (u IconUploader) Upload(form IconUploadForm, user *models.User) (string, error) { + if form.Icon == nil || form.Icon.Filename == "" { + return "", errors.New("File or fileName is empty") + } + + fr, err := form.Icon.Open() + if err != nil { + return "", fmt.Errorf("Icon.Open: %v", err) + } + defer fr.Close() + + if form.Icon.Size > u.Config.FileMaxSize { + return "", errors.New("File is too large") + } + + data, err := ioutil.ReadAll(fr) + if err != nil { + return "", fmt.Errorf("ioutil.ReadAll: %v", err) + } + if !base.IsImageFile(data) { + return "", errors.New("File is not a image") + } + iconName, err := u.uploadIcon(data, user.ID) + if err != nil { + return "", fmt.Errorf("uploadIcon: %v", err) + } + return iconName, nil + +} + +func (u IconUploader) uploadIcon(data []byte, userId int64) (string, error) { + m, err := u.prepare(data) + if err != nil { + return "", err + } + + iconName := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", userId, md5.Sum(data))))) + + if err := os.MkdirAll(setting.IconUploadPath, os.ModePerm); err != nil { + return "", fmt.Errorf("uploadIcon. Failed to create dir %s: %v", setting.AvatarUploadPath, err) + } + + fw, err := os.Create(models.GetCustomIconByHash(iconName)) + if err != nil { + return "", fmt.Errorf("Create: %v", err) + } + defer fw.Close() + + if err = png.Encode(fw, *m); err != nil { + return "", fmt.Errorf("Encode: %v", err) + } + + return iconName, nil +} + +func (u IconUploader) prepare(data []byte) (*image.Image, error) { + imgCfg, _, err := image.DecodeConfig(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("DecodeConfig: %v", err) + } + if imgCfg.Width > u.Config.FileMaxWidth { + return nil, fmt.Errorf("Image width is too large: %d > %d", imgCfg.Width, setting.AvatarMaxWidth) + } + if imgCfg.Height > u.Config.FileMaxHeight { + return nil, fmt.Errorf("Image height is too large: %d > %d", imgCfg.Height, setting.AvatarMaxHeight) + } + + img, _, err := image.Decode(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("Decode: %v", err) + } + + if u.Config.NeedSquare { + if imgCfg.Width != imgCfg.Height { + var newSize, ax, ay int + if imgCfg.Width > imgCfg.Height { + newSize = imgCfg.Height + ax = (imgCfg.Width - imgCfg.Height) / 2 + } else { + newSize = imgCfg.Width + ay = (imgCfg.Height - imgCfg.Width) / 2 + } + + img, err = cutter.Crop(img, cutter.Config{ + Width: newSize, + Height: newSize, + Anchor: image.Point{ax, ay}, + }) + if err != nil { + return nil, err + } + } + } + + if u.Config.NeedResize && u.Config.DefaultSize > 0 { + img = resize.Resize(u.Config.DefaultSize, u.Config.DefaultSize, img, resize.NearestNeighbor) + } + + return &img, nil +} diff --git a/services/badge/user.go b/services/badge/user.go new file mode 100644 index 000000000..025b10f77 --- /dev/null +++ b/services/badge/user.go @@ -0,0 +1,111 @@ +package badge + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/log" +) + +func GetBadgeUsers(badgeId int64, opts models.ListOptions) (int64, []*models.BadgeUser4SHow, error) { + total, list, err := models.GetBadgeUsers(badgeId, opts) + if err != nil { + return 0, nil, err + } + if len(list) == 0 { + return 0, nil, nil + } + r := make([]*models.BadgeUser4SHow, len(list)) + for i := 0; i < len(list); i++ { + r[i] = list[i].ToShow() + } + + return total, r, nil +} + +func AddBadgeUsers(badgeId int64, userNames []string) (int, error) { + userIds := models.GetUserIdsByUserNames(userNames) + if len(userIds) == 0 { + return 0, nil + } + successCount := 0 + for _, v := range userIds { + m := models.BadgeUser{ + UserId: v, + BadgeId: badgeId, + } + _, err := models.AddBadgeUser(m) + if err != nil { + log.Error("AddBadgeUser err in loop, m=%+v. e=%v", m, err) + continue + } + successCount++ + } + return successCount, nil +} + +func DelBadgeUser(id int64) error { + _, err := models.DelBadgeUser(id) + return err +} + +//GetUserBadges Only Returns badges the user has earned +func GetUserBadges(userId int64, opts models.ListOptions) ([]*models.Badge4UserShow, error) { + badges, err := models.GetUserBadgesPaging(userId, models.GetUserBadgesOpts{ListOptions: opts}) + if err != nil { + return nil, err + } + r := make([]*models.Badge4UserShow, len(badges)) + for i, v := range badges { + r[i] = v.ToUserShow() + } + return r, nil +} + +func CountUserBadges(userId int64) (int64, error) { + return models.CountUserBadges(userId) +} + +func GetUserAllBadges(userId int64) ([]models.UserAllBadgeInCategory, error) { + categoryList, err := models.GetBadgeCategoryList() + if err != nil { + return nil, err + } + r := make([]models.UserAllBadgeInCategory, 0) + for _, v := range categoryList { + badges, err := models.GetBadgeByCategoryId(v.ID) + if badges == nil || len(badges) == 0 { + continue + } + userBadgeMap, err := getUserBadgesMap(userId, v.ID) + if err != nil { + return nil, err + } + t := models.UserAllBadgeInCategory{ + CategoryName: v.Name, + CategoryId: v.ID, + LightedNum: len(userBadgeMap), + } + bArray := make([]*models.BadgeShowWithStatus, len(badges)) + for j, v := range badges { + b := &models.BadgeShowWithStatus{Badge: v.ToUserShow()} + if _, has := userBadgeMap[v.ID]; has { + b.IsLighted = true + } + bArray[j] = b + } + t.Badges = bArray + r = append(r, t) + } + return r, nil +} + +func getUserBadgesMap(userId, categoryId int64) (map[int64]*models.Badge, error) { + userBadges, err := models.GetUserBadges(userId, categoryId) + if err != nil { + return nil, err + } + m := make(map[int64]*models.Badge, 0) + for _, v := range userBadges { + m[v.ID] = v + } + return m, nil +} diff --git a/services/cloudbrain/cloudbrainTask/count.go b/services/cloudbrain/cloudbrainTask/count.go new file mode 100644 index 000000000..a9b254618 --- /dev/null +++ b/services/cloudbrain/cloudbrainTask/count.go @@ -0,0 +1,86 @@ +package cloudbrainTask + +import ( + "fmt" + "strconv" + + "code.gitea.io/gitea/models" +) + +type StatusInfo struct { + CloudBrainTypes []int + JobType []models.JobType + NotFinalStatuses []string + ComputeResource string +} + +var cloudbrainOneNotFinalStatuses = []string{string(models.JobWaiting), string(models.JobRunning)} +var cloudbrainTwoNotFinalStatuses = []string{string(models.ModelArtsTrainJobInit), string(models.ModelArtsTrainJobImageCreating), string(models.ModelArtsTrainJobSubmitTrying), string(models.ModelArtsTrainJobWaiting), string(models.ModelArtsTrainJobRunning), string(models.ModelArtsTrainJobScaling), string(models.ModelArtsTrainJobCheckInit), string(models.ModelArtsTrainJobCheckRunning), string(models.ModelArtsTrainJobCheckRunningCompleted)} +var grampusTwoNotFinalStatuses = []string{models.GrampusStatusWaiting, models.GrampusStatusRunning} +var StatusInfoDict = map[string]StatusInfo{string(models.JobTypeDebug) + "-" + strconv.Itoa(models.TypeCloudBrainOne): { + CloudBrainTypes: []int{models.TypeCloudBrainOne}, + JobType: []models.JobType{models.JobTypeDebug}, + NotFinalStatuses: cloudbrainOneNotFinalStatuses, + ComputeResource: models.GPUResource, +}, string(models.JobTypeTrain) + "-" + strconv.Itoa(models.TypeCloudBrainOne): { + CloudBrainTypes: []int{models.TypeCloudBrainOne}, + JobType: []models.JobType{models.JobTypeTrain}, + NotFinalStatuses: cloudbrainOneNotFinalStatuses, + ComputeResource: models.GPUResource, +}, string(models.JobTypeInference) + "-" + strconv.Itoa(models.TypeCloudBrainOne): { + CloudBrainTypes: []int{models.TypeCloudBrainOne}, + JobType: []models.JobType{models.JobTypeInference}, + NotFinalStatuses: cloudbrainOneNotFinalStatuses, + ComputeResource: models.GPUResource, +}, string(models.JobTypeBenchmark) + "-" + strconv.Itoa(models.TypeCloudBrainOne): { + CloudBrainTypes: []int{models.TypeCloudBrainOne}, + JobType: []models.JobType{models.JobTypeBenchmark, models.JobTypeBrainScore, models.JobTypeSnn4imagenet}, + NotFinalStatuses: cloudbrainOneNotFinalStatuses, + ComputeResource: models.GPUResource, +}, string(models.JobTypeDebug) + "-" + strconv.Itoa(models.TypeCloudBrainTwo): { + CloudBrainTypes: []int{models.TypeCloudBrainTwo, models.TypeCDCenter}, + JobType: []models.JobType{models.JobTypeDebug}, + NotFinalStatuses: []string{string(models.ModelArtsCreateQueue), string(models.ModelArtsCreating), string(models.ModelArtsStarting), string(models.ModelArtsReadyToStart), string(models.ModelArtsResizing), string(models.ModelArtsStartQueuing), string(models.ModelArtsRunning), string(models.ModelArtsRestarting)}, + ComputeResource: models.NPUResource, +}, string(models.JobTypeTrain) + "-" + strconv.Itoa(models.TypeCloudBrainTwo): { + CloudBrainTypes: []int{models.TypeCloudBrainTwo}, + JobType: []models.JobType{models.JobTypeTrain}, + NotFinalStatuses: cloudbrainTwoNotFinalStatuses, + ComputeResource: models.NPUResource, +}, string(models.JobTypeInference) + "-" + strconv.Itoa(models.TypeCloudBrainTwo): { + CloudBrainTypes: []int{models.TypeCloudBrainTwo}, + JobType: []models.JobType{models.JobTypeInference}, + NotFinalStatuses: cloudbrainTwoNotFinalStatuses, + ComputeResource: models.NPUResource, +}, string(models.JobTypeTrain) + "-" + strconv.Itoa(models.TypeC2Net) + "-" + models.GPUResource: { + CloudBrainTypes: []int{models.TypeC2Net}, + JobType: []models.JobType{models.JobTypeTrain}, + NotFinalStatuses: grampusTwoNotFinalStatuses, + ComputeResource: models.GPUResource, +}, string(models.JobTypeTrain) + "-" + strconv.Itoa(models.TypeC2Net) + "-" + models.NPUResource: { + CloudBrainTypes: []int{models.TypeC2Net}, + JobType: []models.JobType{models.JobTypeTrain}, + NotFinalStatuses: grampusTwoNotFinalStatuses, + ComputeResource: models.NPUResource, +}} + +func GetNotFinalStatusTaskCount(uid int64, cloudbrainType int, jobType string, computeResource ...string) (int, error) { + jobNewType := jobType + if jobType == string(models.JobTypeSnn4imagenet) || jobType == string(models.JobTypeBrainScore) { + jobNewType = string(models.JobTypeBenchmark) + } + + key := jobNewType + "-" + strconv.Itoa(cloudbrainType) + if len(computeResource) > 0 { + key = key + "-" + computeResource[0] + } + + if statusInfo, ok := StatusInfoDict[key]; ok { + + return models.GetNotFinalStatusTaskCount(uid, statusInfo.NotFinalStatuses, statusInfo.JobType, statusInfo.CloudBrainTypes, statusInfo.ComputeResource) + + } else { + return 0, fmt.Errorf("Can not find the status info.") + } + +} diff --git a/services/cloudbrain/util.go b/services/cloudbrain/util.go new file mode 100644 index 000000000..ab738927e --- /dev/null +++ b/services/cloudbrain/util.go @@ -0,0 +1,33 @@ +package cloudbrain + +import ( + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/setting" + "strings" +) + +func GetAiCenterShow(aiCenter string,ctx *context.Context) string{ + aiCenterInfo := strings.Split(aiCenter, "+") + + if len(aiCenterInfo) == 2{ + if setting.C2NetMapInfo!=nil { + if info,ok:=setting.C2NetMapInfo[aiCenterInfo[0]];ok { + if ctx.Language() == "zh-CN" { + return info.Content + } else { + return info.ContentEN + } + }else{ + return aiCenterInfo[1] + } + + }else{ + return aiCenterInfo[1] + } + + } + + return "" + + +} diff --git a/templates/admin/cloudbrain/list.tmpl b/templates/admin/cloudbrain/list.tmpl index 97d968954..b6dfec77d 100755 --- a/templates/admin/cloudbrain/list.tmpl +++ b/templates/admin/cloudbrain/list.tmpl @@ -283,7 +283,7 @@ {{if eq .JobType "TRAIN"}} -
+